## [1] "N subjects LGI = 0 = 8"
## [1] "SUBJ081" "SUBJ082" "SUBJ139" "SUBJ140" "SUBJ211" "SUBJ212" "SUBJ223"
## [8] "SUBJ225"
| machine | SUBJ | Session | birthday | acq_date | Age | SUBJ_clean | Gender | Birthdate | Diagnostic | ESC | NeuroQuant | Lipoxina | Age_interval | Age_interval10 | ConvexHullArea | PialFullArea | WhiteFullArea | SmoothPialFullArea | ConvexHullFullArea | PialFullVol | WhiteFullVol | SmoothPialFullVol | hemi | ROI | AvgThickness | logAvgThickness | TotalArea | logTotalArea | logTotalFullArea | ExposedArea | logExposedArea | WhiteSurfArea | logWhiteSurfArea | GMvolume | logConvexHullArea | localGI | k | K | S | I | Knorm | Snorm | GaussianCurvature | PialVol | WhiteVol | c | TotalArea_corrected | ExposedArea_corrected | localGI_corrected | logTotalArea_corrected | logExposedArea_corrected | k_corrected | K_corrected | I_corrected | S_corrected |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| Philips-Achieva | SUBJ081 | 1 | 1942-01-23 | 2013-01-24 | 71.05205 | SUBJ081 | FEM | 23/01/1942 | CONTROLE | 3 | NQ_2.0 | NA | 71-75 | 70 | NA | NA | NA | NA | NA | NA | NA | NA | L | F | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ081 | 1 | 1942-01-23 | 2013-01-24 | 71.05205 | SUBJ081 | FEM | 23/01/1942 | CONTROLE | 3 | NQ_2.0 | NA | 71-75 | 70 | NA | NA | NA | NA | NA | NA | NA | NA | R | F | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ081 | 1 | 1942-01-23 | 2013-01-24 | 71.05205 | SUBJ081 | FEM | 23/01/1942 | CONTROLE | 3 | NQ_2.0 | NA | 71-75 | 70 | NA | NA | NA | NA | NA | NA | NA | NA | L | P | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ081 | 1 | 1942-01-23 | 2013-01-24 | 71.05205 | SUBJ081 | FEM | 23/01/1942 | CONTROLE | 3 | NQ_2.0 | NA | 71-75 | 70 | NA | NA | NA | NA | NA | NA | NA | NA | R | P | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ081 | 1 | 1942-01-23 | 2013-01-24 | 71.05205 | SUBJ081 | FEM | 23/01/1942 | CONTROLE | 3 | NQ_2.0 | NA | 71-75 | 70 | NA | NA | NA | NA | NA | NA | NA | NA | L | T | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ081 | 1 | 1942-01-23 | 2013-01-24 | 71.05205 | SUBJ081 | FEM | 23/01/1942 | CONTROLE | 3 | NQ_2.0 | NA | 71-75 | 70 | NA | NA | NA | NA | NA | NA | NA | NA | R | T | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ081 | 1 | 1942-01-23 | 2013-01-24 | 71.05205 | SUBJ081 | FEM | 23/01/1942 | CONTROLE | 3 | NQ_2.0 | NA | 71-75 | 70 | NA | NA | NA | NA | NA | NA | NA | NA | L | O | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ081 | 1 | 1942-01-23 | 2013-01-24 | 71.05205 | SUBJ081 | FEM | 23/01/1942 | CONTROLE | 3 | NQ_2.0 | NA | 71-75 | 70 | NA | NA | NA | NA | NA | NA | NA | NA | R | O | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ082 | 1 | 1963-01-21 | 2013-01-17 | 50.02466 | SUBJ082 | MASC | 21/01/1963 | CONTROLE | 16 | NQ_2.0 | NA | 51-55 | 50 | NA | NA | NA | NA | NA | NA | NA | NA | L | F | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ082 | 1 | 1963-01-21 | 2013-01-17 | 50.02466 | SUBJ082 | MASC | 21/01/1963 | CONTROLE | 16 | NQ_2.0 | NA | 51-55 | 50 | NA | NA | NA | NA | NA | NA | NA | NA | L | P | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ082 | 1 | 1963-01-21 | 2013-01-17 | 50.02466 | SUBJ082 | MASC | 21/01/1963 | CONTROLE | 16 | NQ_2.0 | NA | 51-55 | 50 | NA | NA | NA | NA | NA | NA | NA | NA | L | T | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ082 | 1 | 1963-01-21 | 2013-01-17 | 50.02466 | SUBJ082 | MASC | 21/01/1963 | CONTROLE | 16 | NQ_2.0 | NA | 51-55 | 50 | NA | NA | NA | NA | NA | NA | NA | NA | L | O | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ139 | 1 | 1946-09-01 | 2013-09-24 | 67.10959 | SUBJ139 | FEM | 01/09/1946 | CONTROLE | 16 | NQ_2.0 | 2.82 | 66-70 | 60 | NA | NA | NA | NA | NA | NA | NA | NA | L | F | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ139 | 1 | 1946-09-01 | 2013-09-24 | 67.10959 | SUBJ139 | FEM | 01/09/1946 | CONTROLE | 16 | NQ_2.0 | 2.82 | 66-70 | 60 | NA | NA | NA | NA | NA | NA | NA | NA | R | F | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ139 | 1 | 1946-09-01 | 2013-09-24 | 67.10959 | SUBJ139 | FEM | 01/09/1946 | CONTROLE | 16 | NQ_2.0 | 2.82 | 66-70 | 60 | NA | NA | NA | NA | NA | NA | NA | NA | L | P | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ139 | 1 | 1946-09-01 | 2013-09-24 | 67.10959 | SUBJ139 | FEM | 01/09/1946 | CONTROLE | 16 | NQ_2.0 | 2.82 | 66-70 | 60 | NA | NA | NA | NA | NA | NA | NA | NA | R | P | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ139 | 1 | 1946-09-01 | 2013-09-24 | 67.10959 | SUBJ139 | FEM | 01/09/1946 | CONTROLE | 16 | NQ_2.0 | 2.82 | 66-70 | 60 | NA | NA | NA | NA | NA | NA | NA | NA | L | T | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ139 | 1 | 1946-09-01 | 2013-09-24 | 67.10959 | SUBJ139 | FEM | 01/09/1946 | CONTROLE | 16 | NQ_2.0 | 2.82 | 66-70 | 60 | NA | NA | NA | NA | NA | NA | NA | NA | R | T | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ139 | 1 | 1946-09-01 | 2013-09-24 | 67.10959 | SUBJ139 | FEM | 01/09/1946 | CONTROLE | 16 | NQ_2.0 | 2.82 | 66-70 | 60 | NA | NA | NA | NA | NA | NA | NA | NA | L | O | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ139 | 1 | 1946-09-01 | 2013-09-24 | 67.10959 | SUBJ139 | FEM | 01/09/1946 | CONTROLE | 16 | NQ_2.0 | 2.82 | 66-70 | 60 | NA | NA | NA | NA | NA | NA | NA | NA | R | O | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ140 | 1 | 1958-02-22 | 2013-09-10 | 55.58630 | SUBJ140 | MASC | 22/02/1958 | CONTROLE | 16 | NQ_2.0 | 134.07 | 56-60 | 50 | NA | NA | NA | NA | NA | NA | NA | NA | L | F | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ140 | 1 | 1958-02-22 | 2013-09-10 | 55.58630 | SUBJ140 | MASC | 22/02/1958 | CONTROLE | 16 | NQ_2.0 | 134.07 | 56-60 | 50 | NA | NA | NA | NA | NA | NA | NA | NA | L | P | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ140 | 1 | 1958-02-22 | 2013-09-10 | 55.58630 | SUBJ140 | MASC | 22/02/1958 | CONTROLE | 16 | NQ_2.0 | 134.07 | 56-60 | 50 | NA | NA | NA | NA | NA | NA | NA | NA | L | T | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ140 | 1 | 1958-02-22 | 2013-09-10 | 55.58630 | SUBJ140 | MASC | 22/02/1958 | CONTROLE | 16 | NQ_2.0 | 134.07 | 56-60 | 50 | NA | NA | NA | NA | NA | NA | NA | NA | L | O | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ211 | 1 | 1936-08-13 | 2014-10-07 | 78.20274 | SUBJ211 | FEM | 13/08/1936 | CCL | 13 | NA | 124.75 | 76-80 | 70 | NA | NA | NA | NA | NA | NA | NA | NA | R | F | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ211 | 1 | 1936-08-13 | 2014-10-07 | 78.20274 | SUBJ211 | FEM | 13/08/1936 | CCL | 13 | NA | 124.75 | 76-80 | 70 | NA | NA | NA | NA | NA | NA | NA | NA | R | P | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ211 | 1 | 1936-08-13 | 2014-10-07 | 78.20274 | SUBJ211 | FEM | 13/08/1936 | CCL | 13 | NA | 124.75 | 76-80 | 70 | NA | NA | NA | NA | NA | NA | NA | NA | R | T | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ211 | 1 | 1936-08-13 | 2014-10-07 | 78.20274 | SUBJ211 | FEM | 13/08/1936 | CCL | 13 | NA | 124.75 | 76-80 | 70 | NA | NA | NA | NA | NA | NA | NA | NA | R | O | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ212 | 1 | 1937-07-31 | 2014-09-02 | 77.14247 | SUBJ212 | MASC | 31/07/1937 | CCL | 13 | NQ_2.2 | 94.90 | 76-80 | 70 | NA | NA | NA | NA | NA | NA | NA | NA | L | F | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ212 | 1 | 1937-07-31 | 2014-09-02 | 77.14247 | SUBJ212 | MASC | 31/07/1937 | CCL | 13 | NQ_2.2 | 94.90 | 76-80 | 70 | NA | NA | NA | NA | NA | NA | NA | NA | L | P | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ212 | 1 | 1937-07-31 | 2014-09-02 | 77.14247 | SUBJ212 | MASC | 31/07/1937 | CCL | 13 | NQ_2.2 | 94.90 | 76-80 | 70 | NA | NA | NA | NA | NA | NA | NA | NA | L | T | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ212 | 1 | 1937-07-31 | 2014-09-02 | 77.14247 | SUBJ212 | MASC | 31/07/1937 | CCL | 13 | NQ_2.2 | 94.90 | 76-80 | 70 | NA | NA | NA | NA | NA | NA | NA | NA | L | O | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ223 | 1 | 1951-06-05 | 2015-02-10 | 63.72877 | SUBJ223 | MASC | 05/06/1951 | CCL | 16 | NQ_2.2 | 221.21 | 61-65 | 60 | NA | NA | NA | NA | NA | NA | NA | NA | R | F | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ223 | 1 | 1951-06-05 | 2015-02-10 | 63.72877 | SUBJ223 | MASC | 05/06/1951 | CCL | 16 | NQ_2.2 | 221.21 | 61-65 | 60 | NA | NA | NA | NA | NA | NA | NA | NA | R | P | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ223 | 1 | 1951-06-05 | 2015-02-10 | 63.72877 | SUBJ223 | MASC | 05/06/1951 | CCL | 16 | NQ_2.2 | 221.21 | 61-65 | 60 | NA | NA | NA | NA | NA | NA | NA | NA | R | T | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ223 | 1 | 1951-06-05 | 2015-02-10 | 63.72877 | SUBJ223 | MASC | 05/06/1951 | CCL | 16 | NQ_2.2 | 221.21 | 61-65 | 60 | NA | NA | NA | NA | NA | NA | NA | NA | R | O | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ225 | 1 | 1947-01-18 | 2015-02-24 | 68.14795 | SUBJ225 | MASC | NA | CONTROLE | 16 | NA | 122.70 | 66-70 | 60 | NA | NA | NA | NA | NA | NA | NA | NA | L | F | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ225 | 1 | 1947-01-18 | 2015-02-24 | 68.14795 | SUBJ225 | MASC | NA | CONTROLE | 16 | NA | 122.70 | 66-70 | 60 | NA | NA | NA | NA | NA | NA | NA | NA | L | P | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ225 | 1 | 1947-01-18 | 2015-02-24 | 68.14795 | SUBJ225 | MASC | NA | CONTROLE | 16 | NA | 122.70 | 66-70 | 60 | NA | NA | NA | NA | NA | NA | NA | NA | L | T | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Philips-Achieva | SUBJ225 | 1 | 1947-01-18 | 2015-02-24 | 68.14795 | SUBJ225 | MASC | NA | CONTROLE | 16 | NA | 122.70 | 66-70 | 60 | NA | NA | NA | NA | NA | NA | NA | NA | L | O | 0 | -Inf | 0 | -Inf | NA | 0 | -Inf | 0 | -Inf | 0 | NA | NaN | NaN | NaN | NaN | -Inf | NaN | NaN | 0 | 0 | 0 | Inf | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
## [1] "Escolaridade minima = 4 N de sujeitos com escolaridade = 4 anos, = 3"
## [1] "N de sujeitos com escolaridade = 5 anos, = 1"
## [1] "N de sujeitos com escolaridade = 7 anos, = 1"
| machine | Diagnostic | N | Mean | Max | Min | Median | Std |
|---|---|---|---|---|---|---|---|
| Philips-Achieva | ALZ | 13 | 77.08 | 85.76 | 63.28 | 79.63 | 6.04 |
| Philips-Achieva | CCL | 39 | 72.41 | 83.27 | 61.68 | 71.79 | 4.91 |
| Philips-Achieva | CCL A DU+TBIP | 1 | 76.79 | 76.79 | 76.79 | 76.79 | 0.00 |
| Philips-Achieva | CCL A MD +tab possivel | 1 | 68.76 | 68.76 | 68.76 | 68.76 | 0.00 |
| Philips-Achieva | CCL A MD+PARKINSON | 1 | 75.85 | 75.85 | 75.85 | 75.85 | 0.00 |
| Philips-Achieva | CONTROLE | 80 | 66.04 | 80.35 | 42.53 | 67.95 | 8.33 |
## [1] "N sujeitos = 135"
## [1] "N sujeitos Philips = 135"
## [1] "N sujeitos Philips CTL, MCI e AD = 132"
| Diagnostic | machine | N | Mean | Max | Min | Median | Std |
|---|---|---|---|---|---|---|---|
| ALZ | Philips-Achieva | 13 | 77.08 | 85.76 | 63.28 | 79.63 | 6.04 |
| CCL | Philips-Achieva | 39 | 72.39 | 83.27 | 61.68 | 71.79 | 4.85 |
| CONTROLE | Philips-Achieva | 78 | 66.10 | 80.35 | 42.53 | 68.07 | 8.35 |
## [1] "N sujeitos = 130"
| Diagnostic | N | age | age_range | ESC | COGNITIVE_INDEX | TAU | AB1_40 | AB1_42 | Lipoxin | AvgT | AT | AE | k | K | S | I |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| AD | 13 | 77 ± 6.1 | 63 ; 86 | 13 ± 3 | -3.4 ± 1.5 | NA ± NA | NA ± NA | NA ± NA | NA ± NA | 2.4 ± 0.079 | 95000 ± 9300 | 37000 ± 3000 | 0.28 ± 0.01 | -0.55 ± 0.015 | 9.2 ± 0.13 | 10 ± 0.069 |
| MCI | 33 | 72 ± 4.6 | 62 ; 82 | 13 ± 2.4 | NA ± NA | NA ± NA | NA ± NA | NA ± NA | NA ± NA | 2.5 ± 0.085 | 97000 ± 8500 | 37000 ± 2800 | 0.29 ± 0.0096 | -0.53 ± 0.014 | 9.2 ± 0.12 | 10 ± 0.063 |
| CTL | 77 | 66 ± 8.4 | 43 ; 80 | 15 ± 2.2 | NA ± NA | NA ± NA | NA ± NA | NA ± NA | NA ± NA | 2.5 ± 0.099 | 98000 ± 7800 | 37000 ± 2400 | 0.3 ± 0.0095 | -0.52 ± 0.014 | 9.1 ± 0.1 | 10 ± 0.072 |
| Gender | Diagnostic | N | age | age_range | ESC | COGNITIVE_INDEX | TAU | AB1_40 | AB1_42 | Lipoxin | AvgT | AT | AE | k | K | S | I |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| FEM | AD | 8 | 75 ± 6.4 | 63 ; 82 | 12 ± 3 | -3.3 ± 1.6 | NA ± NA | NA ± NA | NA ± NA | NA ± NA | 2.4 ± 0.073 | 90000 ± 6100 | 35000 ± 1500 | 0.29 ± 0.0094 | -0.54 ± 0.014 | 9.1 ± 0.097 | 10 ± 0.045 |
| FEM | MCI | 19 | 72 ± 5.4 | 62 ; 82 | 13 ± 2.1 | NA ± NA | NA ± NA | NA ± NA | NA ± NA | NA ± NA | 2.5 ± 0.082 | 94000 ± 5800 | 36000 ± 2100 | 0.3 ± 0.0091 | -0.53 ± 0.013 | 9.1 ± 0.093 | 10 ± 0.056 |
| FEM | CTL | 53 | 66 ± 8 | 43 ; 80 | 15 ± 2.3 | NA ± NA | NA ± NA | NA ± NA | NA ± NA | NA ± NA | 2.5 ± 0.11 | 96000 ± 6300 | 37000 ± 1900 | 0.3 ± 0.0092 | -0.52 ± 0.013 | 9.1 ± 0.1 | 10 ± 0.067 |
| MASC | AD | 5 | 80 ± 5 | 71 ; 86 | 14 ± 2.1 | -3.4 ± 1.3 | NA ± NA | NA ± NA | NA ± NA | NA ± NA | 2.4 ± 0.087 | 1e+05 ± 6800 | 40000 ± 2100 | 0.28 ± 0.01 | -0.55 ± 0.016 | 9.3 ± 0.11 | 10 ± 0.048 |
| MASC | MCI | 14 | 73 ± 3.3 | 68 ; 80 | 14 ± 2.8 | -1.6 ± 1 | NA ± NA | NA ± NA | NA ± NA | NA ± NA | 2.4 ± 0.078 | 1e+05 ± 9000 | 39000 ± 2800 | 0.29 ± 0.0095 | -0.54 ± 0.014 | 9.2 ± 0.12 | 10 ± 0.061 |
| MASC | CTL | 24 | 65 ± 9.3 | 48 ; 77 | 15 ± 1.9 | 0.21 ± 0.63 | NA ± NA | NA ± NA | NA ± NA | NA ± NA | 2.5 ± 0.06 | 1e+05 ± 7900 | 39000 ± 2300 | 0.3 ± 0.01 | -0.53 ± 0.015 | 9.2 ± 0.077 | 10 ± 0.064 |
| Df | Sum Sq | Mean Sq | F value | Pr(>F) | |
|---|---|---|---|---|---|
| Diagnostic | 2 | 3891.947 | 1945.97347 | 35.89972 | 0 |
| Residuals | 243 | 13172.012 | 54.20581 | NA | NA |
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = Age ~ Diagnostic, data = dados_hemi_v1)
##
## $Diagnostic
## diff lwr upr p adj
## MCI-AD -4.834646 -8.854706 -0.8145856 0.0136819
## CTL-AD -11.214709 -14.895884 -7.5335333 0.0000000
## CTL-MCI -6.380063 -8.934389 -3.8257371 0.0000000
| Df | Sum Sq | Mean Sq | F value | Pr(>F) | |
|---|---|---|---|---|---|
| Diagnostic | 2 | 284.4938 | 142.246908 | 25.82509 | 0 |
| Residuals | 243 | 1338.4655 | 5.508089 | NA | NA |
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = ESC ~ Diagnostic, data = dados_hemi_v1)
##
## $Diagnostic
## diff lwr upr p adj
## MCI-AD 0.5664336 -0.7150413 1.847908 0.5509076
## CTL-AD 2.6053946 1.4319461 3.778843 0.0000011
## CTL-MCI 2.0389610 1.2247184 2.853204 0.0000000
| Df | Sum Sq | Mean Sq | F value | Pr(>F) | |
|---|---|---|---|---|---|
| Diagnostic | 2 | 345.1955 | 172.5977662 | 186.1942 | 0 |
| Residuals | 239 | 221.5476 | 0.9269774 | NA | NA |
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = COGNITIVE_INDEX ~ Diagnostic, data = dados_hemi_v1)
##
## $Diagnostic
## diff lwr upr p adj
## MCI-AD 1.874222 1.346144 2.402300 0
## CTL-AD 3.559353 3.077455 4.041252 0
## CTL-MCI 1.685132 1.346780 2.023484 0
## [1] "N sujeitos = 123"
## [1] "N sujeitos lobos = 123"
| SUBJ |
|---|
| SUBJ | Age | Diagnostic | machine | ROI | localGI |
|---|---|---|---|---|---|
| SUBJ003 | 82.58 | NA | Philips-Achieva | hemisphere | 2.57 |
| SUBJ005 | 82.87 | NA | Philips-Achieva | hemisphere | 2.51 |
| SUBJ010 | 75.55 | NA | Philips-Achieva | hemisphere | 2.59 |
| SUBJ018 | 75.85 | CCL A MD+PARKINSON | Philips-Achieva | hemisphere | 2.50 |
| SUBJ053 | 66.34 | NA | Philips-Achieva | hemisphere | 2.61 |
| SUBJ074 | 78.95 | ESCOLHER | Philips-Achieva | hemisphere | 2.57 |
| SUBJ079 | 65.71 | NA | Philips-Achieva | hemisphere | 2.55 |
| SUBJ081 | 76.92 | NA | Philips-Achieva | hemisphere | 2.57 |
| SUBJ082 | 55.60 | NA | Philips-Achieva | hemisphere | 2.59 |
| SUBJ093 | 76.79 | CCL A DU+TBIP | Philips-Achieva | hemisphere | 2.70 |
| SUBJ100 | 81.38 | NA | Philips-Achieva | hemisphere | 2.58 |
| SUBJ154 | 67.98 | NA | Philips-Achieva | hemisphere | 2.53 |
| SUBJ155 | 64.91 | NA | Philips-Achieva | hemisphere | 2.57 |
| SUBJ157 | 79.28 | NA | Philips-Achieva | hemisphere | 2.75 |
| SUBJ166 | 76.71 | NA | Philips-Achieva | hemisphere | 2.62 |
| SUBJ174 | 76.38 | NA | Siemens-Prisma | hemisphere | 2.51 |
| SUBJ187 | 68.76 | CCL A MD +tab possivel | Philips-Achieva | hemisphere | 2.68 |
| SUBJ197 | 70.98 | NA | Philips-Achieva | hemisphere | 2.57 |
| SUBJ198 | 72.55 | NA | Philips-Achieva | hemisphere | 2.52 |
| SUBJ203 | 72.35 | NA | Philips-Achieva | hemisphere | 2.63 |
| SUBJ209 | 73.05 | D. MISTA | Philips-Achieva | hemisphere | 2.49 |
| SUBJ212 | 80.94 | NA | Philips-Achieva | hemisphere | 2.57 |
| SUBJ213 | 81.16 | NA | Philips-Achieva | hemisphere | 2.68 |
| SUBJ216 | 68.52 | NA | Philips-Achieva | hemisphere | 2.56 |
| SUBJ217 | 77.02 | NA | Philips-Achieva | hemisphere | 2.58 |
| SUBJ225 | 71.81 | NA | Philips-Achieva | hemisphere | 2.51 |
| SUBJ228 | 65.25 | NA | Philips-Achieva | hemisphere | 2.63 |
| SUBJ229 | 63.22 | NA | Philips-Achieva | hemisphere | 2.51 |
## [1] "N sujeitos = 28"
| Diagnostic | N | age | age_range | ESC | COGNITIVE_INDEX | TAU | AB1_40 | AB1_42 | Lipoxin | AvgT | AT | AE | k | K | S | I |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| AD | 13 | 77 ± 6.1 | 63 ; 86 | 13 ± 3 | -3.4 ± 1.5 | NA ± NA | NA ± NA | NA ± NA | NA ± NA | 2.4 ± 0.079 | 95000 ± 9300 | 37000 ± 3000 | 0.28 ± 0.01 | -0.55 ± 0.015 | 9.2 ± 0.13 | 10 ± 0.069 |
| MCI | 33 | 72 ± 4.6 | 62 ; 82 | 13 ± 2.4 | NA ± NA | NA ± NA | NA ± NA | NA ± NA | NA ± NA | 2.5 ± 0.085 | 97000 ± 8500 | 37000 ± 2800 | 0.29 ± 0.0096 | -0.53 ± 0.014 | 9.2 ± 0.12 | 10 ± 0.063 |
| CTL | 77 | 66 ± 8.4 | 43 ; 80 | 15 ± 2.2 | NA ± NA | NA ± NA | NA ± NA | NA ± NA | NA ± NA | 2.5 ± 0.099 | 98000 ± 7800 | 37000 ± 2400 | 0.3 ± 0.0095 | -0.52 ± 0.014 | 9.1 ± 0.1 | 10 ± 0.072 |
Todos os sujeitos
## `geom_smooth()` using formula 'y ~ x'
| Sample | term | estimate | std.error | statistic | p.value | conf.low | conf.high |
|---|---|---|---|---|---|---|---|
| IDOR | (Intercept) | 0.05 | 0.14 | 0.37 | 0.72 | -0.23 | 0.34 |
| IDOR | logExposedArea | 1.12 | 0.03 | 35.68 | 0.00 | 1.06 | 1.18 |
| Mota&Houzel2015 | (Intercept) | -0.75 | 0.02 | -30.14 | 0.00 | -0.80 | -0.70 |
| Mota&Houzel2015 | logExposedArea | 1.31 | 0.01 | 176.82 | 0.00 | 1.29 | 1.32 |
| Diagnostic | term | estimate | std.error | statistic | p.value | conf.low | conf.high |
|---|---|---|---|---|---|---|---|
| AD | (Intercept) | 0.20 | 0.39 | 0.50 | 0.62 | -0.62 | 1.01 |
| AD | logExposedArea | 1.09 | 0.09 | 12.60 | 0.00 | 0.91 | 1.27 |
| MCI | (Intercept) | 0.53 | 0.21 | 2.50 | 0.02 | 0.11 | 0.95 |
| MCI | logExposedArea | 1.02 | 0.05 | 22.08 | 0.00 | 0.93 | 1.11 |
| CTL | (Intercept) | -0.23 | 0.18 | -1.27 | 0.21 | -0.60 | 0.13 |
| CTL | logExposedArea | 1.19 | 0.04 | 29.52 | 0.00 | 1.11 | 1.27 |
## `geom_smooth()` using formula 'y ~ x'
## [1] "Verificando a diferenca entre o coeficiente obtido para a Sample e o teorico de 5/4, qual o valor t deste teste? 3.65083088843401"
## [1] "Verificando a diferenca entre o coeficiente obtido para a Sample e o teorico de 5/4, qual o valor p deste teste? 0.000319781905143799"
## `geom_smooth()` using formula 'y ~ x'
| Gender | Diagnostic | term | estimate | std.error | statistic | p.value | conf.low | conf.high |
|---|---|---|---|---|---|---|---|---|
| FEM | AD | (Intercept) | -0.54 | 0.91 | -0.59 | 0.56 | -2.50 | 1.42 |
| FEM | AD | logExposedArea | 1.25 | 0.20 | 6.22 | 0.00 | 0.82 | 1.68 |
| FEM | MCI | (Intercept) | 0.69 | 0.35 | 1.98 | 0.06 | -0.02 | 1.39 |
| FEM | MCI | logExposedArea | 0.98 | 0.08 | 12.90 | 0.00 | 0.83 | 1.14 |
| FEM | CTL | (Intercept) | -0.15 | 0.26 | -0.59 | 0.55 | -0.66 | 0.36 |
| FEM | CTL | logExposedArea | 1.17 | 0.06 | 20.79 | 0.00 | 1.06 | 1.28 |
| MASC | AD | (Intercept) | 0.61 | 1.03 | 0.59 | 0.57 | -1.77 | 2.99 |
| MASC | AD | logExposedArea | 1.00 | 0.22 | 4.44 | 0.00 | 0.48 | 1.51 |
| MASC | MCI | (Intercept) | 0.37 | 0.36 | 1.02 | 0.32 | -0.37 | 1.11 |
| MASC | MCI | logExposedArea | 1.05 | 0.08 | 13.41 | 0.00 | 0.89 | 1.21 |
| MASC | CTL | (Intercept) | -0.40 | 0.39 | -1.03 | 0.31 | -1.19 | 0.39 |
| MASC | CTL | logExposedArea | 1.22 | 0.09 | 14.32 | 0.00 | 1.05 | 1.40 |
## Df Sum Sq Mean Sq F value Pr(>F)
## Diagnostic 2 3892 1946.0 35.9 2.19e-14 ***
## Residuals 243 13172 54.2
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## `geom_smooth()` using formula 'y ~ x'
##
## Kruskal-Wallis rank sum test
##
## data: estimate by Age_interval
## Kruskal-Wallis chi-squared = 6, df = 6, p-value = 0.4232
## Df Sum Sq Mean Sq F value Pr(>F)
## Age_interval 9 0.01417 0.0015748 8.217 1.29e-10 ***
## Residuals 236 0.04523 0.0001917
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
| Diagnostic | Age_interval | term | estimate | std.error | statistic | p.value | conf.low | conf.high |
|---|---|---|---|---|---|---|---|---|
| AD | 61-65 | (Intercept) | -1.3800344 | NaN | NaN | NaN | NaN | NaN |
| AD | 61-65 | logExposedArea | 1.4311064 | NaN | NaN | NaN | NaN | NaN |
| AD | 71-75 | (Intercept) | -3.3032245 | 1.8248956 | -1.8100895 | 0.1445322 | -8.3699470 | 1.7634980 |
| AD | 71-75 | logExposedArea | 1.8543047 | 0.3988190 | 4.6494889 | 0.0096651 | 0.7470055 | 2.9616039 |
| AD | 76-80 | (Intercept) | 0.8159310 | 0.2727576 | 2.9914148 | 0.0172966 | 0.1869509 | 1.4449111 |
| AD | 76-80 | logExposedArea | 0.9526044 | 0.0598704 | 15.9111174 | 0.0000002 | 0.8145431 | 1.0906657 |
| AD | 81-85 | (Intercept) | -0.4718700 | 0.2657462 | -1.7756417 | 0.1504466 | -1.2096998 | 0.2659597 |
| AD | 81-85 | logExposedArea | 1.2301838 | 0.0580231 | 21.2016139 | 0.0000293 | 1.0690858 | 1.3912818 |
| AD | 86-90 | (Intercept) | 2.1891660 | NaN | NaN | NaN | NaN | NaN |
| AD | 86-90 | logExposedArea | 0.6504200 | NaN | NaN | NaN | NaN | NaN |
| MCI | 61-65 | (Intercept) | 2.3131923 | NaN | NaN | NaN | NaN | NaN |
| MCI | 61-65 | logExposedArea | 0.6253010 | NaN | NaN | NaN | NaN | NaN |
| MCI | 66-70 | (Intercept) | 0.4239613 | 0.3804256 | 1.1144392 | 0.2797505 | -0.3752834 | 1.2232059 |
| MCI | 66-70 | logExposedArea | 1.0414698 | 0.0833789 | 12.4908135 | 0.0000000 | 0.8662973 | 1.2166423 |
| MCI | 71-75 | (Intercept) | 0.2764698 | 0.2965132 | 0.9324032 | 0.3604141 | -0.3355033 | 0.8884429 |
| MCI | 71-75 | logExposedArea | 1.0739725 | 0.0648748 | 16.5545434 | 0.0000000 | 0.9400776 | 1.2078675 |
| MCI | 76-80 | (Intercept) | 1.1421077 | 0.2177653 | 5.2446716 | 0.0003763 | 0.6568963 | 1.6273191 |
| MCI | 76-80 | logExposedArea | 0.8844672 | 0.0474975 | 18.6213267 | 0.0000000 | 0.7786361 | 0.9902983 |
| MCI | 81-85 | (Intercept) | 4.9796606 | 1.4682022 | 3.3916721 | 0.0274876 | 0.9032776 | 9.0560435 |
| MCI | 81-85 | logExposedArea | 0.0401461 | 0.3214635 | 0.1248854 | 0.9066391 | -0.8523796 | 0.9326718 |
| CTL | 41-45 | (Intercept) | 5.0923632 | NaN | NaN | NaN | NaN | NaN |
| CTL | 41-45 | logExposedArea | 0.0250237 | NaN | NaN | NaN | NaN | NaN |
| CTL | 46-50 | (Intercept) | -2.6579040 | 2.1494199 | -1.2365680 | 0.2838906 | -8.6256503 | 3.3098424 |
| CTL | 46-50 | logExposedArea | 1.7152907 | 0.4667264 | 3.6751524 | 0.0212945 | 0.4194504 | 3.0111309 |
| CTL | 51-55 | (Intercept) | -0.2966366 | 0.5624711 | -0.5273811 | 0.6122373 | -1.5936974 | 1.0004241 |
| CTL | 51-55 | logExposedArea | 1.2018177 | 0.1228270 | 9.7846371 | 0.0000100 | 0.9185781 | 1.4850573 |
| CTL | 56-60 | (Intercept) | -2.0224427 | 0.9449449 | -2.1402758 | 0.0535647 | -4.0813008 | 0.0364154 |
| CTL | 56-60 | logExposedArea | 1.5767768 | 0.2058965 | 7.6581049 | 0.0000059 | 1.1281669 | 2.0253867 |
| CTL | 61-65 | (Intercept) | 0.5630934 | 0.3606200 | 1.5614589 | 0.1289040 | -0.1733910 | 1.2995777 |
| CTL | 61-65 | logExposedArea | 1.0126912 | 0.0788068 | 12.8502964 | 0.0000000 | 0.8517462 | 1.1736363 |
| CTL | 66-70 | (Intercept) | 0.2835556 | 0.3763524 | 0.7534311 | 0.4563769 | -0.4812846 | 1.0483958 |
| CTL | 66-70 | logExposedArea | 1.0729497 | 0.0822893 | 13.0387461 | 0.0000000 | 0.9057177 | 1.2401817 |
| CTL | 71-75 | (Intercept) | -0.1762787 | 0.2862784 | -0.6157599 | 0.5424083 | -0.7594088 | 0.4068513 |
| CTL | 71-75 | logExposedArea | 1.1726165 | 0.0627506 | 18.6869462 | 0.0000000 | 1.0447978 | 1.3004353 |
| CTL | 76-80 | (Intercept) | 1.3743693 | 0.8018556 | 1.7139859 | 0.1058343 | -0.3254887 | 3.0742273 |
| CTL | 76-80 | logExposedArea | 0.8327395 | 0.1758673 | 4.7350436 | 0.0002242 | 0.4599174 | 1.2055616 |
| CTL | 81-85 | (Intercept) | 3.4850432 | NaN | NaN | NaN | NaN | NaN |
| CTL | 81-85 | logExposedArea | 0.3737067 | NaN | NaN | NaN | NaN | NaN |
## `summarise()` has grouped output by 'Diagnostic'. You can override using the `.groups` argument.
## `geom_smooth()` using formula 'y ~ x'
Brain volume:
## `geom_smooth()` using formula 'y ~ x'
##
## Pearson's product-moment correlation
##
## data: filter(dados_hemi_v1, Diagnostic == "CTL")$GMvolume and filter(dados_hemi_v1, Diagnostic == "CTL")$K
## t = 4.462, df = 152, p-value = 1.572e-05
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.1925173 0.4730166
## sample estimates:
## cor
## 0.3403158
K:
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
##
## Pearson's product-moment correlation
##
## data: filter(dados_hemi_v1, Diagnostic == "CTL")$Age and filter(dados_hemi_v1, Diagnostic == "CTL")$K
## t = -4.176, df = 152, p-value = 4.981e-05
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.4558437 -0.1713459
## sample estimates:
## cor
## -0.3208125
I:
## `geom_smooth()` using formula 'y ~ x'
| Diagnostic | r.squared | adj.r.squared | sigma | statistic | p.value | df | logLik | AIC | BIC | deviance | df.residual | nobs |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| AD | 0.0064888 | -0.0349075 | 0.0700630 | 0.1567492 | 0.6956650 | 1 | 33.26553 | -60.53106 | -56.75677 | 0.1178117 | 24 | 26 |
| MCI | 0.0015591 | -0.0140415 | 0.0634854 | 0.0999409 | 0.7529293 | 1 | 89.32396 | -172.64792 | -166.07895 | 0.2579450 | 64 | 66 |
| CTL | 0.2236800 | 0.2185726 | 0.0637780 | 43.7955460 | 0.0000000 | 1 | 206.35135 | -406.70271 | -397.59185 | 0.6182810 | 152 | 154 |
##
## Pearson's product-moment correlation
##
## data: filter(dados_hemi_v1, Diagnostic == "CTL")$Age and filter(dados_hemi_v1, Diagnostic == "CTL")$I
## t = -6.6178, df = 152, p-value = 5.879e-10
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.5871863 -0.3402379
## sample estimates:
## cor
## -0.4729482
## Df Sum Sq Mean Sq F value Pr(>F)
## Age_interval 9 0.2825 0.031391 7.415 1.59e-09 ***
## Residuals 236 0.9991 0.004234
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## `geom_smooth()` using formula 'y ~ x'
S:
## `geom_smooth()` using formula 'y ~ x'
| Diagnostic | r.squared | adj.r.squared | sigma | statistic | p.value | df | logLik | AIC | BIC | deviance | df.residual | nobs |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| AD | 0.0010166 | -0.0406077 | 0.1311617 | 0.0244241 | 0.8771178 | 1 | 16.96259 | -27.92518 | -24.15089 | 0.4128813 | 24 | 26 |
| MCI | 0.1478839 | 0.1345696 | 0.1159341 | 11.1071374 | 0.0014325 | 1 | 49.57790 | -93.15580 | -86.58683 | 0.8602065 | 64 | 66 |
| CTL | 0.0154817 | 0.0090046 | 0.1004618 | 2.3902182 | 0.1241761 | 1 | 136.37855 | -266.75711 | -257.64625 | 1.5340718 | 152 | 154 |
##
## Pearson's product-moment correlation
##
## data: filter(dados_hemi_v1, Diagnostic == "CTL")$Age and filter(dados_hemi_v1, Diagnostic == "CTL")$S
## t = 1.546, df = 152, p-value = 0.1242
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.03441253 0.27713228
## sample estimates:
## cor
## 0.1244254
## Df Sum Sq Mean Sq F value Pr(>F)
## Age_interval 9 0.4051 0.04501 3.892 0.000127 ***
## Residuals 236 2.7298 0.01157
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
##
## Call:
## lm(formula = 1/2 * logAvgThickness_age_decay + logTotalArea_age_decay ~
## logExposedArea_age_decay, data = dados_hemi_v1, na.action = na.omit)
##
## Residuals:
## Min 1Q Median 3Q Max
## -0.038356 -0.009132 0.000975 0.009838 0.032061
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 0.2819 0.1348 2.091 0.0376 *
## logExposedArea_age_decay 1.0789 0.0292 36.953 <2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.01346 on 244 degrees of freedom
## Multiple R-squared: 0.8484, Adjusted R-squared: 0.8478
## F-statistic: 1366 on 1 and 244 DF, p-value: < 2.2e-16
| Diagnostic | term | estimate | std.error | statistic | p.value | conf.low | conf.high |
|---|---|---|---|---|---|---|---|
| AD | (Intercept) | 0.24 | 0.38 | 0.63 | 0.54 | -0.54 | 1.02 |
| AD | logExposedArea_age_decay | 1.09 | 0.08 | 13.31 | 0.00 | 0.92 | 1.25 |
| MCI | (Intercept) | 0.46 | 0.20 | 2.29 | 0.03 | 0.06 | 0.87 |
| MCI | logExposedArea_age_decay | 1.04 | 0.04 | 23.65 | 0.00 | 0.95 | 1.13 |
| CTL | (Intercept) | 0.05 | 0.18 | 0.30 | 0.76 | -0.30 | 0.41 |
| CTL | logExposedArea_age_decay | 1.13 | 0.04 | 29.02 | 0.00 | 1.05 | 1.21 |
##
## Kruskal-Wallis rank sum test
##
## data: estimate by Diagnostic
## Kruskal-Wallis chi-squared = 2, df = 2, p-value = 0.3679
| Diagnostic | term | estimate | std.error | statistic | p.value | conf.low | conf.high |
|---|---|---|---|---|---|---|---|
| AD | (Intercept) | 0.20 | 0.39 | 0.50 | 0.62 | -0.62 | 1.01 |
| AD | logExposedArea | 1.09 | 0.09 | 12.60 | 0.00 | 0.91 | 1.27 |
| MCI | (Intercept) | 0.53 | 0.21 | 2.50 | 0.02 | 0.11 | 0.95 |
| MCI | logExposedArea | 1.02 | 0.05 | 22.08 | 0.00 | 0.93 | 1.11 |
| CTL | (Intercept) | -0.23 | 0.18 | -1.27 | 0.21 | -0.60 | 0.13 |
| CTL | logExposedArea | 1.19 | 0.04 | 29.52 | 0.00 | 1.11 | 1.27 |
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Df Sum Sq Mean Sq F value Pr(>F)
## Diagnostic 2 0.058 0.0292 81.596 <2e-16 ***
## ROI 4 6.222 1.5556 4347.166 <2e-16 ***
## Diagnostic:ROI 8 0.003 0.0004 1.013 0.424
## Residuals 1207 0.432 0.0004
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Df Sum Sq Mean Sq F value Pr(>F)
## Diagnostic 2 0.1539 0.07697 6.275 0.0022 **
## Residuals 243 2.9810 0.01227
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = S ~ Diagnostic, data = dados_hemi_v1)
##
## $Diagnostic
## diff lwr upr p adj
## MCI-AD -0.04832085 -0.10879738 0.012155678 0.1454395
## CTL-AD -0.07844671 -0.13382516 -0.023068259 0.0027747
## CTL-MCI -0.03012586 -0.06855234 0.008300618 0.1561058
## Df Sum Sq Mean Sq F value Pr(>F)
## Diagnostic 2 0.1083 0.05415 11.21 2.2e-05 ***
## Residuals 243 1.1734 0.00483
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = I ~ Diagnostic, data = dados_hemi_v1)
##
## $Diagnostic
## diff lwr upr p adj
## MCI-AD 0.04158669 0.003644613 0.07952877 0.0277853
## CTL-AD 0.06616956 0.031425940 0.10091318 0.0000325
## CTL-MCI 0.02458287 0.000474665 0.04869107 0.0445049
Is it easier to diff diag when younger?
## Df Sum Sq Mean Sq F value Pr(>F)
## Diagnostic 2 0.00951 0.004754 24.206 3.90e-10 ***
## Age_interval 5 0.01008 0.002016 10.266 9.08e-09 ***
## Diagnostic:Age_interval 7 0.00581 0.000830 4.224 0.000228 ***
## Residuals 199 0.03909 0.000196
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## diff lwr upr p adj
## MCI:61-65-AD:61-65 0.0247295988 -0.024817696 0.0742768932 9.504734e-01
## CTL:61-65-AD:61-65 0.0440618281 0.007948342 0.0801753144 3.252613e-03
## AD:66-70-AD:61-65 NA NA NA NA
## MCI:66-70-AD:61-65 0.0392822521 0.002536995 0.0760275091 2.283589e-02
## CTL:66-70-AD:61-65 0.0430458757 0.007050601 0.0790411506 4.534547e-03
## AD:71-75-AD:61-65 0.0202524212 -0.020202775 0.0607076177 9.491546e-01
## MCI:71-75-AD:61-65 0.0308792707 -0.005478504 0.0672370450 2.092742e-01
## CTL:71-75-AD:61-65 0.0228814145 -0.013169538 0.0589323666 7.217380e-01
## AD:76-80-AD:61-65 0.0179841586 -0.020395011 0.0563633279 9.721875e-01
## MCI:76-80-AD:61-65 0.0226882735 -0.015154098 0.0605306447 8.015808e-01
## CTL:76-80-AD:61-65 0.0332570905 -0.003673282 0.0701874633 1.347703e-01
## AD:81-85-AD:61-65 0.0183866342 -0.022068562 0.0588418308 9.793185e-01
## MCI:81-85-AD:61-65 0.0117412129 -0.028713984 0.0521964094 9.999045e-01
## CTL:81-85-AD:61-65 0.0383324981 -0.011214796 0.0878797926 3.636968e-01
## AD:86-90-AD:61-65 -0.0052373329 -0.054784627 0.0443099615 1.000000e+00
## MCI:86-90-AD:61-65 NA NA NA NA
## CTL:86-90-AD:61-65 NA NA NA NA
## CTL:61-65-MCI:61-65 0.0193322293 -0.016781257 0.0554457157 9.113170e-01
## AD:66-70-MCI:61-65 NA NA NA NA
## MCI:66-70-MCI:61-65 0.0145526533 -0.022192604 0.0512979103 9.952006e-01
## CTL:66-70-MCI:61-65 0.0183162769 -0.017678998 0.0543115518 9.414707e-01
## AD:71-75-MCI:61-65 -0.0044771776 -0.044932374 0.0359780189 1.000000e+00
## MCI:71-75-MCI:61-65 0.0061496720 -0.030208102 0.0425074462 1.000000e+00
## CTL:71-75-MCI:61-65 -0.0018481843 -0.037899136 0.0342027678 1.000000e+00
## AD:76-80-MCI:61-65 -0.0067454401 -0.045124609 0.0316337291 9.999999e-01
## MCI:76-80-MCI:61-65 -0.0020413252 -0.039883696 0.0358010460 1.000000e+00
## CTL:76-80-MCI:61-65 0.0085274917 -0.028402881 0.0454578645 9.999964e-01
## AD:81-85-MCI:61-65 -0.0063429645 -0.046798161 0.0341122320 1.000000e+00
## MCI:81-85-MCI:61-65 -0.0129883859 -0.053443582 0.0274668106 9.996311e-01
## CTL:81-85-MCI:61-65 0.0136028994 -0.035944395 0.0631501938 9.999558e-01
## AD:86-90-MCI:61-65 -0.0299669317 -0.079514226 0.0195803628 7.903899e-01
## MCI:86-90-MCI:61-65 NA NA NA NA
## CTL:86-90-MCI:61-65 NA NA NA NA
## AD:66-70-CTL:61-65 NA NA NA NA
## MCI:66-70-CTL:61-65 -0.0047795760 -0.018902728 0.0093435759 9.992738e-01
## CTL:66-70-CTL:61-65 -0.0010159524 -0.013053781 0.0110218764 1.000000e+00
## AD:71-75-CTL:61-65 -0.0238094069 -0.045851921 -0.0017668927 1.999484e-02
## MCI:71-75-CTL:61-65 -0.0131825574 -0.026264501 -0.0001006136 4.598849e-02
## CTL:71-75-CTL:61-65 -0.0211804136 -0.033383719 -0.0089771086 6.768296e-07
## AD:76-80-CTL:61-65 -0.0260776695 -0.044027882 -0.0081274567 9.597816e-05
## MCI:76-80-CTL:61-65 -0.0213735546 -0.038145393 -0.0046017161 1.506187e-03
## CTL:76-80-CTL:61-65 -0.0108047376 -0.025402749 0.0037932740 4.465391e-01
## AD:81-85-CTL:61-65 -0.0256751939 -0.047717708 -0.0036326797 6.871740e-03
## MCI:81-85-CTL:61-65 -0.0323206152 -0.054363129 -0.0102781010 7.693326e-05
## CTL:81-85-CTL:61-65 -0.0057293300 -0.041842816 0.0303841563 1.000000e+00
## AD:86-90-CTL:61-65 -0.0492991610 -0.085412647 -0.0131856747 3.865864e-04
## MCI:86-90-CTL:61-65 NA NA NA NA
## CTL:86-90-CTL:61-65 NA NA NA NA
## MCI:66-70-AD:66-70 NA NA NA NA
## CTL:66-70-AD:66-70 NA NA NA NA
## AD:71-75-AD:66-70 NA NA NA NA
## MCI:71-75-AD:66-70 NA NA NA NA
## CTL:71-75-AD:66-70 NA NA NA NA
## AD:76-80-AD:66-70 NA NA NA NA
## MCI:76-80-AD:66-70 NA NA NA NA
## CTL:76-80-AD:66-70 NA NA NA NA
## AD:81-85-AD:66-70 NA NA NA NA
## MCI:81-85-AD:66-70 NA NA NA NA
## CTL:81-85-AD:66-70 NA NA NA NA
## AD:86-90-AD:66-70 NA NA NA NA
## MCI:86-90-AD:66-70 NA NA NA NA
## CTL:86-90-AD:66-70 NA NA NA NA
## CTL:66-70-MCI:66-70 0.0037636236 -0.010054457 0.0175817038 9.999605e-01
## AD:71-75-MCI:66-70 -0.0190298309 -0.042092841 0.0040331795 2.530061e-01
## MCI:71-75-MCI:66-70 -0.0084029813 -0.023139578 0.0063336150 8.580697e-01
## CTL:71-75-MCI:66-70 -0.0164008376 -0.030363311 -0.0024383639 6.032595e-03
## AD:76-80-MCI:66-70 -0.0212980935 -0.040487678 -0.0021085088 1.389610e-02
## MCI:76-80-MCI:66-70 -0.0165939786 -0.034686092 0.0014981353 1.155479e-01
## CTL:76-80-MCI:66-70 -0.0060251616 -0.022122738 0.0100724147 9.975039e-01
## AD:81-85-MCI:66-70 -0.0208956178 -0.043958628 0.0021673926 1.281810e-01
## MCI:81-85-MCI:66-70 -0.0275410392 -0.050604050 -0.0044780288 4.640981e-03
## CTL:81-85-MCI:66-70 -0.0009497540 -0.037695011 0.0357955030 1.000000e+00
## AD:86-90-MCI:66-70 -0.0445195850 -0.081264842 -0.0077743280 3.658894e-03
## MCI:86-90-MCI:66-70 NA NA NA NA
## CTL:86-90-MCI:66-70 NA NA NA NA
## AD:71-75-CTL:66-70 -0.0227934545 -0.044641758 -0.0009451513 3.086157e-02
## MCI:71-75-CTL:66-70 -0.0121666049 -0.024918592 0.0005853822 8.071838e-02
## CTL:71-75-CTL:66-70 -0.0201644612 -0.032013367 -0.0083155552 1.265567e-06
## AD:76-80-CTL:66-70 -0.0250617171 -0.042772902 -0.0073505324 1.764978e-04
## MCI:76-80-CTL:66-70 -0.0203576022 -0.036873367 -0.0038418373 2.731578e-03
## CTL:76-80-CTL:66-70 -0.0097887852 -0.024091857 0.0045142867 5.940247e-01
## AD:81-85-CTL:66-70 -0.0246592414 -0.046507545 -0.0028109382 1.097228e-02
## MCI:81-85-CTL:66-70 -0.0313046628 -0.053152966 -0.0094563596 1.326660e-04
## CTL:81-85-CTL:66-70 -0.0047133776 -0.040708652 0.0312818973 1.000000e+00
## AD:86-90-CTL:66-70 -0.0482832086 -0.084278484 -0.0122879337 5.564458e-04
## MCI:86-90-CTL:66-70 NA NA NA NA
## CTL:86-90-CTL:66-70 NA NA NA NA
## MCI:71-75-AD:71-75 0.0106268496 -0.011813656 0.0330673550 9.692717e-01
## CTL:71-75-AD:71-75 0.0026289933 -0.019310917 0.0245689041 1.000000e+00
## AD:76-80-AD:71-75 -0.0022682626 -0.027854375 0.0233178503 1.000000e+00
## MCI:76-80-AD:71-75 0.0024358523 -0.022337795 0.0272094996 1.000000e+00
## CTL:76-80-AD:71-75 0.0130046693 -0.010352149 0.0363614879 8.804043e-01
## AD:81-85-AD:71-75 -0.0018657869 -0.030471931 0.0267403568 1.000000e+00
## MCI:81-85-AD:71-75 -0.0085112083 -0.037117352 0.0200949355 9.998660e-01
## CTL:81-85-AD:71-75 0.0180800769 -0.022375120 0.0585352734 9.825094e-01
## AD:86-90-AD:71-75 -0.0254897541 -0.065944951 0.0149654424 7.328777e-01
## MCI:86-90-AD:71-75 NA NA NA NA
## CTL:86-90-AD:71-75 NA NA NA NA
## CTL:71-75-MCI:71-75 -0.0079978563 -0.020906168 0.0049104552 7.574760e-01
## AD:76-80-MCI:71-75 -0.0128951121 -0.031331869 0.0055416444 5.538704e-01
## MCI:76-80-MCI:71-75 -0.0081909972 -0.025482568 0.0091005735 9.691848e-01
## CTL:76-80-MCI:71-75 0.0023778197 -0.012814474 0.0175701137 1.000000e+00
## AD:81-85-MCI:71-75 -0.0124926365 -0.034933142 0.0099478689 8.805340e-01
## MCI:81-85-MCI:71-75 -0.0191380579 -0.041578563 0.0033024476 2.033763e-01
## CTL:81-85-MCI:71-75 0.0074532274 -0.028904547 0.0438110016 9.999994e-01
## AD:86-90-MCI:71-75 -0.0361166036 -0.072474378 0.0002411706 5.369859e-02
## MCI:86-90-MCI:71-75 NA NA NA NA
## CTL:86-90-MCI:71-75 NA NA NA NA
## AD:76-80-CTL:71-75 -0.0048972559 -0.022721324 0.0129268119 9.999553e-01
## MCI:76-80-CTL:71-75 -0.0001931410 -0.016829902 0.0164436201 1.000000e+00
## CTL:76-80-CTL:71-75 0.0103756760 -0.004066941 0.0248182933 5.033192e-01
## AD:81-85-CTL:71-75 -0.0044947802 -0.026434691 0.0174451305 9.999994e-01
## MCI:81-85-CTL:71-75 -0.0111402016 -0.033080112 0.0107997092 9.425345e-01
## CTL:81-85-CTL:71-75 0.0154510836 -0.020599868 0.0515020357 9.886314e-01
## AD:86-90-CTL:71-75 -0.0281187474 -0.064169699 0.0079322047 3.489203e-01
## MCI:86-90-CTL:71-75 NA NA NA NA
## CTL:86-90-CTL:71-75 NA NA NA NA
## MCI:76-80-AD:76-80 0.0047041149 -0.016510769 0.0259189989 9.999980e-01
## CTL:76-80-AD:76-80 0.0152729318 -0.004268785 0.0348146483 3.452659e-01
## AD:81-85-AD:76-80 0.0004024756 -0.025183637 0.0259885885 1.000000e+00
## MCI:81-85-AD:76-80 -0.0062429457 -0.031829059 0.0193431671 9.999918e-01
## CTL:81-85-AD:76-80 0.0203483395 -0.018030830 0.0587275087 9.178325e-01
## AD:86-90-AD:76-80 -0.0232214915 -0.061600661 0.0151576777 7.898732e-01
## MCI:86-90-AD:76-80 NA NA NA NA
## CTL:86-90-AD:76-80 NA NA NA NA
## CTL:76-80-MCI:76-80 0.0105688169 -0.007896369 0.0290340034 8.542780e-01
## AD:81-85-MCI:76-80 -0.0043016393 -0.029075287 0.0204720080 1.000000e+00
## MCI:81-85-MCI:76-80 -0.0109470606 -0.035720708 0.0138265866 9.844039e-01
## CTL:81-85-MCI:76-80 0.0156442246 -0.022198147 0.0534865958 9.922725e-01
## AD:86-90-MCI:76-80 -0.0279256064 -0.065767978 0.0099167648 4.522147e-01
## MCI:86-90-MCI:76-80 NA NA NA NA
## CTL:86-90-MCI:76-80 NA NA NA NA
## AD:81-85-CTL:76-80 -0.0148704562 -0.038227275 0.0084863624 7.169454e-01
## MCI:81-85-CTL:76-80 -0.0215158776 -0.044872696 0.0018409410 1.112848e-01
## CTL:81-85-CTL:76-80 0.0050754077 -0.031854965 0.0420057805 1.000000e+00
## AD:86-90-CTL:76-80 -0.0384944234 -0.075424796 -0.0015640505 3.118513e-02
## MCI:86-90-CTL:76-80 NA NA NA NA
## CTL:86-90-CTL:76-80 NA NA NA NA
## MCI:81-85-AD:81-85 -0.0066454214 -0.035251565 0.0219607224 9.999960e-01
## CTL:81-85-AD:81-85 0.0199458639 -0.020509333 0.0604010604 9.555534e-01
## AD:86-90-AD:81-85 -0.0236239671 -0.064079164 0.0168312294 8.328823e-01
## MCI:86-90-AD:81-85 NA NA NA NA
## CTL:86-90-AD:81-85 NA NA NA NA
## CTL:81-85-MCI:81-85 0.0265912852 -0.013863911 0.0670464818 6.651102e-01
## AD:86-90-MCI:81-85 -0.0169785458 -0.057433742 0.0234766507 9.909039e-01
## MCI:86-90-MCI:81-85 NA NA NA NA
## CTL:86-90-MCI:81-85 NA NA NA NA
## AD:86-90-CTL:81-85 -0.0435698310 -0.093117125 0.0059774634 1.626630e-01
## MCI:86-90-CTL:81-85 NA NA NA NA
## CTL:86-90-CTL:81-85 NA NA NA NA
## MCI:86-90-AD:86-90 NA NA NA NA
## CTL:86-90-AD:86-90 NA NA NA NA
## CTL:86-90-MCI:86-90 NA NA NA NA
## Df Sum Sq Mean Sq F value Pr(>F)
## Diagnostic 2 0.00951 0.004754 24.206 3.90e-10 ***
## Age_interval 5 0.01008 0.002016 10.266 9.08e-09 ***
## Diagnostic:Age_interval 7 0.00581 0.000830 4.224 0.000228 ***
## Residuals 199 0.03909 0.000196
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## diff lwr upr p adj
## MCI:61-65-AD:61-65 0.0247295988 -0.024817696 0.0742768932 9.504734e-01
## CTL:61-65-AD:61-65 0.0440618281 0.007948342 0.0801753144 3.252613e-03
## AD:66-70-AD:61-65 NA NA NA NA
## MCI:66-70-AD:61-65 0.0392822521 0.002536995 0.0760275091 2.283589e-02
## CTL:66-70-AD:61-65 0.0430458757 0.007050601 0.0790411506 4.534547e-03
## AD:71-75-AD:61-65 0.0202524212 -0.020202775 0.0607076177 9.491546e-01
## MCI:71-75-AD:61-65 0.0308792707 -0.005478504 0.0672370450 2.092742e-01
## CTL:71-75-AD:61-65 0.0228814145 -0.013169538 0.0589323666 7.217380e-01
## AD:76-80-AD:61-65 0.0179841586 -0.020395011 0.0563633279 9.721875e-01
## MCI:76-80-AD:61-65 0.0226882735 -0.015154098 0.0605306447 8.015808e-01
## CTL:76-80-AD:61-65 0.0332570905 -0.003673282 0.0701874633 1.347703e-01
## AD:81-85-AD:61-65 0.0183866342 -0.022068562 0.0588418308 9.793185e-01
## MCI:81-85-AD:61-65 0.0117412129 -0.028713984 0.0521964094 9.999045e-01
## CTL:81-85-AD:61-65 0.0383324981 -0.011214796 0.0878797926 3.636968e-01
## AD:86-90-AD:61-65 -0.0052373329 -0.054784627 0.0443099615 1.000000e+00
## MCI:86-90-AD:61-65 NA NA NA NA
## CTL:86-90-AD:61-65 NA NA NA NA
## CTL:61-65-MCI:61-65 0.0193322293 -0.016781257 0.0554457157 9.113170e-01
## AD:66-70-MCI:61-65 NA NA NA NA
## MCI:66-70-MCI:61-65 0.0145526533 -0.022192604 0.0512979103 9.952006e-01
## CTL:66-70-MCI:61-65 0.0183162769 -0.017678998 0.0543115518 9.414707e-01
## AD:71-75-MCI:61-65 -0.0044771776 -0.044932374 0.0359780189 1.000000e+00
## MCI:71-75-MCI:61-65 0.0061496720 -0.030208102 0.0425074462 1.000000e+00
## CTL:71-75-MCI:61-65 -0.0018481843 -0.037899136 0.0342027678 1.000000e+00
## AD:76-80-MCI:61-65 -0.0067454401 -0.045124609 0.0316337291 9.999999e-01
## MCI:76-80-MCI:61-65 -0.0020413252 -0.039883696 0.0358010460 1.000000e+00
## CTL:76-80-MCI:61-65 0.0085274917 -0.028402881 0.0454578645 9.999964e-01
## AD:81-85-MCI:61-65 -0.0063429645 -0.046798161 0.0341122320 1.000000e+00
## MCI:81-85-MCI:61-65 -0.0129883859 -0.053443582 0.0274668106 9.996311e-01
## CTL:81-85-MCI:61-65 0.0136028994 -0.035944395 0.0631501938 9.999558e-01
## AD:86-90-MCI:61-65 -0.0299669317 -0.079514226 0.0195803628 7.903899e-01
## MCI:86-90-MCI:61-65 NA NA NA NA
## CTL:86-90-MCI:61-65 NA NA NA NA
## AD:66-70-CTL:61-65 NA NA NA NA
## MCI:66-70-CTL:61-65 -0.0047795760 -0.018902728 0.0093435759 9.992738e-01
## CTL:66-70-CTL:61-65 -0.0010159524 -0.013053781 0.0110218764 1.000000e+00
## AD:71-75-CTL:61-65 -0.0238094069 -0.045851921 -0.0017668927 1.999484e-02
## MCI:71-75-CTL:61-65 -0.0131825574 -0.026264501 -0.0001006136 4.598849e-02
## CTL:71-75-CTL:61-65 -0.0211804136 -0.033383719 -0.0089771086 6.768296e-07
## AD:76-80-CTL:61-65 -0.0260776695 -0.044027882 -0.0081274567 9.597816e-05
## MCI:76-80-CTL:61-65 -0.0213735546 -0.038145393 -0.0046017161 1.506187e-03
## CTL:76-80-CTL:61-65 -0.0108047376 -0.025402749 0.0037932740 4.465391e-01
## AD:81-85-CTL:61-65 -0.0256751939 -0.047717708 -0.0036326797 6.871740e-03
## MCI:81-85-CTL:61-65 -0.0323206152 -0.054363129 -0.0102781010 7.693326e-05
## CTL:81-85-CTL:61-65 -0.0057293300 -0.041842816 0.0303841563 1.000000e+00
## AD:86-90-CTL:61-65 -0.0492991610 -0.085412647 -0.0131856747 3.865864e-04
## MCI:86-90-CTL:61-65 NA NA NA NA
## CTL:86-90-CTL:61-65 NA NA NA NA
## MCI:66-70-AD:66-70 NA NA NA NA
## CTL:66-70-AD:66-70 NA NA NA NA
## AD:71-75-AD:66-70 NA NA NA NA
## MCI:71-75-AD:66-70 NA NA NA NA
## CTL:71-75-AD:66-70 NA NA NA NA
## AD:76-80-AD:66-70 NA NA NA NA
## MCI:76-80-AD:66-70 NA NA NA NA
## CTL:76-80-AD:66-70 NA NA NA NA
## AD:81-85-AD:66-70 NA NA NA NA
## MCI:81-85-AD:66-70 NA NA NA NA
## CTL:81-85-AD:66-70 NA NA NA NA
## AD:86-90-AD:66-70 NA NA NA NA
## MCI:86-90-AD:66-70 NA NA NA NA
## CTL:86-90-AD:66-70 NA NA NA NA
## CTL:66-70-MCI:66-70 0.0037636236 -0.010054457 0.0175817038 9.999605e-01
## AD:71-75-MCI:66-70 -0.0190298309 -0.042092841 0.0040331795 2.530061e-01
## MCI:71-75-MCI:66-70 -0.0084029813 -0.023139578 0.0063336150 8.580697e-01
## CTL:71-75-MCI:66-70 -0.0164008376 -0.030363311 -0.0024383639 6.032595e-03
## AD:76-80-MCI:66-70 -0.0212980935 -0.040487678 -0.0021085088 1.389610e-02
## MCI:76-80-MCI:66-70 -0.0165939786 -0.034686092 0.0014981353 1.155479e-01
## CTL:76-80-MCI:66-70 -0.0060251616 -0.022122738 0.0100724147 9.975039e-01
## AD:81-85-MCI:66-70 -0.0208956178 -0.043958628 0.0021673926 1.281810e-01
## MCI:81-85-MCI:66-70 -0.0275410392 -0.050604050 -0.0044780288 4.640981e-03
## CTL:81-85-MCI:66-70 -0.0009497540 -0.037695011 0.0357955030 1.000000e+00
## AD:86-90-MCI:66-70 -0.0445195850 -0.081264842 -0.0077743280 3.658894e-03
## MCI:86-90-MCI:66-70 NA NA NA NA
## CTL:86-90-MCI:66-70 NA NA NA NA
## AD:71-75-CTL:66-70 -0.0227934545 -0.044641758 -0.0009451513 3.086157e-02
## MCI:71-75-CTL:66-70 -0.0121666049 -0.024918592 0.0005853822 8.071838e-02
## CTL:71-75-CTL:66-70 -0.0201644612 -0.032013367 -0.0083155552 1.265567e-06
## AD:76-80-CTL:66-70 -0.0250617171 -0.042772902 -0.0073505324 1.764978e-04
## MCI:76-80-CTL:66-70 -0.0203576022 -0.036873367 -0.0038418373 2.731578e-03
## CTL:76-80-CTL:66-70 -0.0097887852 -0.024091857 0.0045142867 5.940247e-01
## AD:81-85-CTL:66-70 -0.0246592414 -0.046507545 -0.0028109382 1.097228e-02
## MCI:81-85-CTL:66-70 -0.0313046628 -0.053152966 -0.0094563596 1.326660e-04
## CTL:81-85-CTL:66-70 -0.0047133776 -0.040708652 0.0312818973 1.000000e+00
## AD:86-90-CTL:66-70 -0.0482832086 -0.084278484 -0.0122879337 5.564458e-04
## MCI:86-90-CTL:66-70 NA NA NA NA
## CTL:86-90-CTL:66-70 NA NA NA NA
## MCI:71-75-AD:71-75 0.0106268496 -0.011813656 0.0330673550 9.692717e-01
## CTL:71-75-AD:71-75 0.0026289933 -0.019310917 0.0245689041 1.000000e+00
## AD:76-80-AD:71-75 -0.0022682626 -0.027854375 0.0233178503 1.000000e+00
## MCI:76-80-AD:71-75 0.0024358523 -0.022337795 0.0272094996 1.000000e+00
## CTL:76-80-AD:71-75 0.0130046693 -0.010352149 0.0363614879 8.804043e-01
## AD:81-85-AD:71-75 -0.0018657869 -0.030471931 0.0267403568 1.000000e+00
## MCI:81-85-AD:71-75 -0.0085112083 -0.037117352 0.0200949355 9.998660e-01
## CTL:81-85-AD:71-75 0.0180800769 -0.022375120 0.0585352734 9.825094e-01
## AD:86-90-AD:71-75 -0.0254897541 -0.065944951 0.0149654424 7.328777e-01
## MCI:86-90-AD:71-75 NA NA NA NA
## CTL:86-90-AD:71-75 NA NA NA NA
## CTL:71-75-MCI:71-75 -0.0079978563 -0.020906168 0.0049104552 7.574760e-01
## AD:76-80-MCI:71-75 -0.0128951121 -0.031331869 0.0055416444 5.538704e-01
## MCI:76-80-MCI:71-75 -0.0081909972 -0.025482568 0.0091005735 9.691848e-01
## CTL:76-80-MCI:71-75 0.0023778197 -0.012814474 0.0175701137 1.000000e+00
## AD:81-85-MCI:71-75 -0.0124926365 -0.034933142 0.0099478689 8.805340e-01
## MCI:81-85-MCI:71-75 -0.0191380579 -0.041578563 0.0033024476 2.033763e-01
## CTL:81-85-MCI:71-75 0.0074532274 -0.028904547 0.0438110016 9.999994e-01
## AD:86-90-MCI:71-75 -0.0361166036 -0.072474378 0.0002411706 5.369859e-02
## MCI:86-90-MCI:71-75 NA NA NA NA
## CTL:86-90-MCI:71-75 NA NA NA NA
## AD:76-80-CTL:71-75 -0.0048972559 -0.022721324 0.0129268119 9.999553e-01
## MCI:76-80-CTL:71-75 -0.0001931410 -0.016829902 0.0164436201 1.000000e+00
## CTL:76-80-CTL:71-75 0.0103756760 -0.004066941 0.0248182933 5.033192e-01
## AD:81-85-CTL:71-75 -0.0044947802 -0.026434691 0.0174451305 9.999994e-01
## MCI:81-85-CTL:71-75 -0.0111402016 -0.033080112 0.0107997092 9.425345e-01
## CTL:81-85-CTL:71-75 0.0154510836 -0.020599868 0.0515020357 9.886314e-01
## AD:86-90-CTL:71-75 -0.0281187474 -0.064169699 0.0079322047 3.489203e-01
## MCI:86-90-CTL:71-75 NA NA NA NA
## CTL:86-90-CTL:71-75 NA NA NA NA
## MCI:76-80-AD:76-80 0.0047041149 -0.016510769 0.0259189989 9.999980e-01
## CTL:76-80-AD:76-80 0.0152729318 -0.004268785 0.0348146483 3.452659e-01
## AD:81-85-AD:76-80 0.0004024756 -0.025183637 0.0259885885 1.000000e+00
## MCI:81-85-AD:76-80 -0.0062429457 -0.031829059 0.0193431671 9.999918e-01
## CTL:81-85-AD:76-80 0.0203483395 -0.018030830 0.0587275087 9.178325e-01
## AD:86-90-AD:76-80 -0.0232214915 -0.061600661 0.0151576777 7.898732e-01
## MCI:86-90-AD:76-80 NA NA NA NA
## CTL:86-90-AD:76-80 NA NA NA NA
## CTL:76-80-MCI:76-80 0.0105688169 -0.007896369 0.0290340034 8.542780e-01
## AD:81-85-MCI:76-80 -0.0043016393 -0.029075287 0.0204720080 1.000000e+00
## MCI:81-85-MCI:76-80 -0.0109470606 -0.035720708 0.0138265866 9.844039e-01
## CTL:81-85-MCI:76-80 0.0156442246 -0.022198147 0.0534865958 9.922725e-01
## AD:86-90-MCI:76-80 -0.0279256064 -0.065767978 0.0099167648 4.522147e-01
## MCI:86-90-MCI:76-80 NA NA NA NA
## CTL:86-90-MCI:76-80 NA NA NA NA
## AD:81-85-CTL:76-80 -0.0148704562 -0.038227275 0.0084863624 7.169454e-01
## MCI:81-85-CTL:76-80 -0.0215158776 -0.044872696 0.0018409410 1.112848e-01
## CTL:81-85-CTL:76-80 0.0050754077 -0.031854965 0.0420057805 1.000000e+00
## AD:86-90-CTL:76-80 -0.0384944234 -0.075424796 -0.0015640505 3.118513e-02
## MCI:86-90-CTL:76-80 NA NA NA NA
## CTL:86-90-CTL:76-80 NA NA NA NA
## MCI:81-85-AD:81-85 -0.0066454214 -0.035251565 0.0219607224 9.999960e-01
## CTL:81-85-AD:81-85 0.0199458639 -0.020509333 0.0604010604 9.555534e-01
## AD:86-90-AD:81-85 -0.0236239671 -0.064079164 0.0168312294 8.328823e-01
## MCI:86-90-AD:81-85 NA NA NA NA
## CTL:86-90-AD:81-85 NA NA NA NA
## CTL:81-85-MCI:81-85 0.0265912852 -0.013863911 0.0670464818 6.651102e-01
## AD:86-90-MCI:81-85 -0.0169785458 -0.057433742 0.0234766507 9.909039e-01
## MCI:86-90-MCI:81-85 NA NA NA NA
## CTL:86-90-MCI:81-85 NA NA NA NA
## AD:86-90-CTL:81-85 -0.0435698310 -0.093117125 0.0059774634 1.626630e-01
## MCI:86-90-CTL:81-85 NA NA NA NA
## CTL:86-90-CTL:81-85 NA NA NA NA
## MCI:86-90-AD:86-90 NA NA NA NA
## CTL:86-90-AD:86-90 NA NA NA NA
## CTL:86-90-MCI:86-90 NA NA NA NA
Is it easier to diff diag when younger?
## `geom_smooth()` using formula 'y ~ x'
| Diagnostic | term | estimate | std.error | statistic | p.value | conf.low | conf.high |
|---|---|---|---|---|---|---|---|
| AD | (Intercept) | -0.038 | 0.069 | -0.546 | 0.586 | -0.175 | 0.100 |
| AD | logExposedArea_corrected | 1.139 | 0.016 | 72.633 | 0.000 | 1.108 | 1.170 |
| MCI | (Intercept) | -0.092 | 0.043 | -2.157 | 0.032 | -0.176 | -0.008 |
| MCI | logExposedArea_corrected | 1.155 | 0.010 | 120.088 | 0.000 | 1.136 | 1.174 |
| CTL | (Intercept) | -0.108 | 0.030 | -3.658 | 0.000 | -0.166 | -0.050 |
| CTL | logExposedArea_corrected | 1.160 | 0.007 | 173.625 | 0.000 | 1.147 | 1.173 |
| ROI | Diagnostic | term | estimate | std.error | statistic | p.value | conf.low | conf.high |
|---|---|---|---|---|---|---|---|---|
| F | AD | (Intercept) | 0.09 | 0.51 | 0.17 | 0.87 | -0.96 | 1.13 |
| F | AD | logExposedArea_corrected | 1.11 | 0.11 | 9.74 | 0.00 | 0.87 | 1.34 |
| F | MCI | (Intercept) | 0.41 | 0.22 | 1.82 | 0.07 | -0.04 | 0.85 |
| F | MCI | logExposedArea_corrected | 1.04 | 0.05 | 20.81 | 0.00 | 0.94 | 1.14 |
| F | CTL | (Intercept) | -0.07 | 0.19 | -0.36 | 0.72 | -0.45 | 0.31 |
| F | CTL | logExposedArea_corrected | 1.14 | 0.04 | 26.40 | 0.00 | 1.06 | 1.23 |
| O | AD | (Intercept) | 0.39 | 0.41 | 0.95 | 0.35 | -0.46 | 1.23 |
| O | AD | logExposedArea_corrected | 1.04 | 0.10 | 10.60 | 0.00 | 0.84 | 1.24 |
| O | MCI | (Intercept) | 0.39 | 0.18 | 2.19 | 0.03 | 0.03 | 0.75 |
| O | MCI | logExposedArea_corrected | 1.04 | 0.04 | 24.18 | 0.00 | 0.95 | 1.13 |
| O | CTL | (Intercept) | -0.03 | 0.15 | -0.17 | 0.87 | -0.32 | 0.27 |
| O | CTL | logExposedArea_corrected | 1.14 | 0.04 | 31.72 | 0.00 | 1.07 | 1.21 |
| P | AD | (Intercept) | 0.51 | 0.28 | 1.85 | 0.08 | -0.06 | 1.08 |
| P | AD | logExposedArea_corrected | 1.02 | 0.06 | 17.12 | 0.00 | 0.90 | 1.15 |
| P | MCI | (Intercept) | 0.37 | 0.20 | 1.83 | 0.07 | -0.03 | 0.78 |
| P | MCI | logExposedArea_corrected | 1.06 | 0.04 | 23.83 | 0.00 | 0.97 | 1.15 |
| P | CTL | (Intercept) | 0.28 | 0.14 | 1.99 | 0.05 | 0.00 | 0.56 |
| P | CTL | logExposedArea_corrected | 1.08 | 0.03 | 35.14 | 0.00 | 1.02 | 1.14 |
| T | AD | (Intercept) | 0.63 | 0.39 | 1.65 | 0.11 | -0.16 | 1.43 |
| T | AD | logExposedArea_corrected | 0.99 | 0.09 | 11.41 | 0.00 | 0.81 | 1.17 |
| T | MCI | (Intercept) | 0.24 | 0.19 | 1.23 | 0.22 | -0.15 | 0.63 |
| T | MCI | logExposedArea_corrected | 1.08 | 0.04 | 24.85 | 0.00 | 0.99 | 1.17 |
| T | CTL | (Intercept) | 0.12 | 0.16 | 0.76 | 0.45 | -0.20 | 0.45 |
| T | CTL | logExposedArea_corrected | 1.11 | 0.04 | 30.31 | 0.00 | 1.04 | 1.18 |
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
| diag | x |
|---|---|
| AD | 1.171455 |
| MCI | 1.187479 |
| CTL | 1.185704 |
| ROI | Diagnostic | term | estimate | std.error | statistic | p.value | conf.low | conf.high |
|---|---|---|---|---|---|---|---|---|
| F | AD | (Intercept) | 0.18 | 0.49 | 0.36 | 0.72 | -0.84 | 1.20 |
| F | AD | logExposedArea_age_decay | 1.09 | 0.11 | 10.00 | 0.00 | 0.87 | 1.32 |
| F | MCI | (Intercept) | 0.36 | 0.22 | 1.65 | 0.10 | -0.08 | 0.80 |
| F | MCI | logExposedArea_age_decay | 1.05 | 0.05 | 21.58 | 0.00 | 0.96 | 1.15 |
| F | CTL | (Intercept) | 0.14 | 0.19 | 0.76 | 0.45 | -0.23 | 0.52 |
| F | CTL | logExposedArea_age_decay | 1.10 | 0.04 | 26.31 | 0.00 | 1.02 | 1.18 |
| O | AD | (Intercept) | 0.43 | 0.40 | 1.06 | 0.30 | -0.40 | 1.26 |
| O | AD | logExposedArea_age_decay | 1.04 | 0.10 | 10.89 | 0.00 | 0.84 | 1.23 |
| O | MCI | (Intercept) | 0.35 | 0.18 | 1.99 | 0.05 | 0.00 | 0.71 |
| O | MCI | logExposedArea_age_decay | 1.06 | 0.04 | 25.24 | 0.00 | 0.97 | 1.14 |
| O | CTL | (Intercept) | 0.11 | 0.15 | 0.74 | 0.46 | -0.19 | 0.42 |
| O | CTL | logExposedArea_age_decay | 1.11 | 0.04 | 30.52 | 0.00 | 1.04 | 1.19 |
| P | AD | (Intercept) | 0.49 | 0.26 | 1.91 | 0.07 | -0.04 | 1.02 |
| P | AD | logExposedArea_age_decay | 1.04 | 0.06 | 18.75 | 0.00 | 0.92 | 1.15 |
| P | MCI | (Intercept) | 0.40 | 0.20 | 1.98 | 0.05 | 0.00 | 0.80 |
| P | MCI | logExposedArea_age_decay | 1.06 | 0.04 | 24.44 | 0.00 | 0.97 | 1.15 |
| P | CTL | (Intercept) | 0.41 | 0.13 | 3.10 | 0.00 | 0.15 | 0.68 |
| P | CTL | logExposedArea_age_decay | 1.06 | 0.03 | 36.73 | 0.00 | 1.00 | 1.11 |
| T | AD | (Intercept) | 0.53 | 0.37 | 1.43 | 0.17 | -0.23 | 1.29 |
| T | AD | logExposedArea_age_decay | 1.02 | 0.08 | 12.54 | 0.00 | 0.86 | 1.19 |
| T | MCI | (Intercept) | 0.28 | 0.18 | 1.52 | 0.13 | -0.09 | 0.65 |
| T | MCI | logExposedArea_age_decay | 1.08 | 0.04 | 26.45 | 0.00 | 1.00 | 1.16 |
| T | CTL | (Intercept) | 0.38 | 0.15 | 2.49 | 0.01 | 0.08 | 0.69 |
| T | CTL | logExposedArea_age_decay | 1.06 | 0.03 | 31.17 | 0.00 | 0.99 | 1.13 |
| diag | ROI | x |
|---|---|---|
| AD | F | 1.091508 |
| MCI | F | 1.053151 |
| CTL | F | 1.102154 |
| AD | O | 1.035802 |
| MCI | O | 1.056347 |
| CTL | O | 1.113277 |
| AD | P | 1.036516 |
| MCI | P | 1.060304 |
| CTL | P | 1.056710 |
| AD | T | 1.023934 |
| MCI | T | 1.080907 |
| CTL | T | 1.060230 |
| ROI | Diagnostic | term | estimate | std.error | statistic | p.value | conf.low | conf.high |
|---|---|---|---|---|---|---|---|---|
| F | AD | (Intercept) | 0.18 | 0.49 | 0.36 | 0.72 | -0.84 | 1.20 |
| F | AD | logExposedArea_age_decay | 1.09 | 0.11 | 10.00 | 0.00 | 0.87 | 1.32 |
| F | MCI | (Intercept) | 0.36 | 0.22 | 1.65 | 0.10 | -0.08 | 0.80 |
| F | MCI | logExposedArea_age_decay | 1.05 | 0.05 | 21.58 | 0.00 | 0.96 | 1.15 |
| F | CTL | (Intercept) | 0.14 | 0.19 | 0.76 | 0.45 | -0.23 | 0.52 |
| F | CTL | logExposedArea_age_decay | 1.10 | 0.04 | 26.31 | 0.00 | 1.02 | 1.18 |
| O | AD | (Intercept) | 0.43 | 0.40 | 1.06 | 0.30 | -0.40 | 1.26 |
| O | AD | logExposedArea_age_decay | 1.04 | 0.10 | 10.89 | 0.00 | 0.84 | 1.23 |
| O | MCI | (Intercept) | 0.35 | 0.18 | 1.99 | 0.05 | 0.00 | 0.71 |
| O | MCI | logExposedArea_age_decay | 1.06 | 0.04 | 25.24 | 0.00 | 0.97 | 1.14 |
| O | CTL | (Intercept) | 0.11 | 0.15 | 0.74 | 0.46 | -0.19 | 0.42 |
| O | CTL | logExposedArea_age_decay | 1.11 | 0.04 | 30.52 | 0.00 | 1.04 | 1.19 |
| P | AD | (Intercept) | 0.49 | 0.26 | 1.91 | 0.07 | -0.04 | 1.02 |
| P | AD | logExposedArea_age_decay | 1.04 | 0.06 | 18.75 | 0.00 | 0.92 | 1.15 |
| P | MCI | (Intercept) | 0.40 | 0.20 | 1.98 | 0.05 | 0.00 | 0.80 |
| P | MCI | logExposedArea_age_decay | 1.06 | 0.04 | 24.44 | 0.00 | 0.97 | 1.15 |
| P | CTL | (Intercept) | 0.41 | 0.13 | 3.10 | 0.00 | 0.15 | 0.68 |
| P | CTL | logExposedArea_age_decay | 1.06 | 0.03 | 36.73 | 0.00 | 1.00 | 1.11 |
| T | AD | (Intercept) | 0.53 | 0.37 | 1.43 | 0.17 | -0.23 | 1.29 |
| T | AD | logExposedArea_age_decay | 1.02 | 0.08 | 12.54 | 0.00 | 0.86 | 1.19 |
| T | MCI | (Intercept) | 0.28 | 0.18 | 1.52 | 0.13 | -0.09 | 0.65 |
| T | MCI | logExposedArea_age_decay | 1.08 | 0.04 | 26.45 | 0.00 | 1.00 | 1.16 |
| T | CTL | (Intercept) | 0.38 | 0.15 | 2.49 | 0.01 | 0.08 | 0.69 |
| T | CTL | logExposedArea_age_decay | 1.06 | 0.03 | 31.17 | 0.00 | 0.99 | 1.13 |
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using method = 'loess' and formula 'y ~ x'
## `geom_smooth()` using method = 'loess' and formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
| Diagnostic | N_SUBJ |
|---|---|
| AD | 13 |
| MCI | 33 |
| CTL | 77 |
| Diagnostic | N_SUBJ |
|---|---|
| AD | 13 |
| MCI | 33 |
| CTL | 77 |
## Df Sum Sq Mean Sq F value Pr(>F)
## Diagnostic 2 0.01121 0.005607 28.27 9.13e-12 ***
## Residuals 243 0.04819 0.000198
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = K ~ Diagnostic, data = dados_hemi_v1)
##
## $Diagnostic
## diff lwr upr p adj
## MCI-AD 0.015650636 0.007961177 0.02334010 0.0000083
## CTL-AD 0.021969964 0.014928714 0.02901121 0.0000000
## CTL-MCI 0.006319328 0.001433485 0.01120517 0.0071486
## Df Sum Sq Mean Sq F value Pr(>F)
## Diagnostic 2 0.00904 0.004519 14.71 9.49e-07 ***
## Residuals 239 0.07345 0.000307
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = K ~ Diagnostic, data = filter(dados_lobos_v1, ROI == "F"))
##
## $Diagnostic
## diff lwr upr p adj
## MCI-AD 0.014609191 0.0050152217 0.02420316 0.0011588
## CTL-AD 0.019888810 0.0111100645 0.02866756 0.0000006
## CTL-MCI 0.005279619 -0.0008538009 0.01141304 0.1072424
## Df Sum Sq Mean Sq F value Pr(>F)
## Diagnostic 2 0.01088 0.005439 10.07 6.3e-05 ***
## Residuals 239 0.12902 0.000540
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = K ~ Diagnostic, data = filter(dados_lobos_v1, ROI == "O"))
##
## $Diagnostic
## diff lwr upr p adj
## MCI-AD 0.014822783 0.002107549 0.02753802 0.0176045
## CTL-AD 0.021529149 0.009894360 0.03316394 0.0000563
## CTL-MCI 0.006706366 -0.001422478 0.01483521 0.1282589
## Df Sum Sq Mean Sq F value Pr(>F)
## Diagnostic 2 0.01254 0.006269 16.65 1.7e-07 ***
## Residuals 239 0.08998 0.000376
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = K ~ Diagnostic, data = filter(dados_lobos_v1, ROI == "P"))
##
## $Diagnostic
## diff lwr upr p adj
## MCI-AD 0.021557695 0.010938941 0.032176449 0.0000088
## CTL-AD 0.023693603 0.013977151 0.033410055 0.0000001
## CTL-MCI 0.002135908 -0.004652656 0.008924473 0.7387226
## Df Sum Sq Mean Sq F value Pr(>F)
## Diagnostic 2 0.01975 0.009876 28.49 7.97e-12 ***
## Residuals 239 0.08284 0.000347
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = K ~ Diagnostic, data = filter(dados_lobos_v1, ROI == "T"))
##
## $Diagnostic
## diff lwr upr p adj
## MCI-AD 0.01547690 0.005288318 0.02566549 0.0011957
## CTL-AD 0.02747185 0.018149018 0.03679469 0.0000000
## CTL-MCI 0.01199495 0.005481393 0.01850851 0.0000615
## Joining, by = c("Contrast", "diff", "lwr", "upr", "p adj", "ROI", "variable", "agecorrection")
## Joining, by = c("Contrast", "diff", "lwr", "upr", "p adj", "ROI", "variable", "agecorrection")
## Joining, by = c("Contrast", "diff", "lwr", "upr", "p adj", "ROI", "variable", "agecorrection")
## Joining, by = c("Contrast", "diff", "lwr", "upr", "p adj", "ROI", "variable", "agecorrection")
## Df Sum Sq Mean Sq F value Pr(>F)
## Diagnostic 2 0.00622 0.0031120 17.11 1.12e-07 ***
## Residuals 243 0.04419 0.0001819
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = K_age_decay ~ Diagnostic, data = dados_hemi_v1)
##
## $Diagnostic
## diff lwr upr p adj
## MCI-AD 0.013367888 0.006004416 0.020731361 0.0000793
## CTL-AD 0.016674777 0.009932034 0.023417520 0.0000001
## CTL-MCI 0.003306889 -0.001371824 0.007985602 0.2201387
## Df Sum Sq Mean Sq F value Pr(>F)
## Diagnostic 2 0.00382 0.0019106 7.034 0.00108 **
## Residuals 239 0.06492 0.0002716
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = K_age_decay ~ Diagnostic, data = filter(dados_lobos_v1, ROI == "F"))
##
## $Diagnostic
## diff lwr upr p adj
## MCI-AD 0.011065282 0.002045444 0.020085120 0.0115641
## CTL-AD 0.013124061 0.004870661 0.021377461 0.0006473
## CTL-MCI 0.002058779 -0.003707599 0.007825157 0.6772752
## Df Sum Sq Mean Sq F value Pr(>F)
## Diagnostic 2 0.00393 0.0019647 4.518 0.0119 *
## Residuals 239 0.10393 0.0004348
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = K_age_decay ~ Diagnostic, data = filter(dados_lobos_v1, ROI == "O"))
##
## $Diagnostic
## diff lwr upr p adj
## MCI-AD 0.011920560 0.0005084727 0.023332648 0.0383080
## CTL-AD 0.013281933 0.0028395590 0.023724307 0.0083710
## CTL-MCI 0.001361373 -0.0059343696 0.008657115 0.8988113
## Df Sum Sq Mean Sq F value Pr(>F)
## Diagnostic 2 0.01086 0.005429 13.85 2.04e-06 ***
## Residuals 239 0.09372 0.000392
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = K_age_decay ~ Diagnostic, data = filter(dados_lobos_v1, ROI == "P"))
##
## $Diagnostic
## diff lwr upr p adj
## MCI-AD 0.023170002 0.012332836 0.034007169 0.0000027
## CTL-AD 0.020506039 0.010589734 0.030422345 0.0000059
## CTL-MCI -0.002663963 -0.009592158 0.004264233 0.6365167
## Df Sum Sq Mean Sq F value Pr(>F)
## Diagnostic 2 0.00857 0.004283 12.57 6.46e-06 ***
## Residuals 239 0.08146 0.000341
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = K_age_decay ~ Diagnostic, data = filter(dados_lobos_v1, ROI == "T"))
##
## $Diagnostic
## diff lwr upr p adj
## MCI-AD 0.011008198 0.0009050420 0.02111135 0.0289563
## CTL-AD 0.018409571 0.0091649059 0.02765424 0.0000133
## CTL-MCI 0.007401373 0.0009424307 0.01386032 0.0200989
## Joining, by = c("Contrast", "diff", "lwr", "upr", "p adj", "ROI", "variable", "agecorrection")
## Joining, by = c("Contrast", "diff", "lwr", "upr", "p adj", "ROI", "variable", "agecorrection")
## Joining, by = c("Contrast", "diff", "lwr", "upr", "p adj", "ROI", "variable", "agecorrection")
## Joining, by = c("Contrast", "diff", "lwr", "upr", "p adj", "ROI", "variable", "agecorrection")
## Joining, by = c("Contrast", "diff", "lwr", "upr", "p adj", "ROI", "variable", "agecorrection")
## Df Sum Sq Mean Sq F value Pr(>F)
## Diagnostic 2 0.01343 0.006717 25.3 1.05e-10 ***
## Residuals 243 0.06452 0.000265
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Df Sum Sq Mean Sq F value Pr(>F)
## Diagnostic 2 0.01143 0.005714 16.86 1.41e-07 ***
## Residuals 239 0.08098 0.000339
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Df Sum Sq Mean Sq F value Pr(>F)
## Diagnostic 2 0.0043 0.002149 5.142 0.00651 **
## Residuals 239 0.0999 0.000418
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Df Sum Sq Mean Sq F value Pr(>F)
## Diagnostic 2 0.00769 0.003843 11.8 1.3e-05 ***
## Residuals 239 0.07786 0.000326
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Df Sum Sq Mean Sq F value Pr(>F)
## Diagnostic 2 0.03180 0.015898 41.54 3.28e-16 ***
## Residuals 239 0.09146 0.000383
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Joining, by = c("Contrast", "diff", "lwr", "upr", "p adj", "ROI", "variable", "agecorrection")
## Joining, by = c("Contrast", "diff", "lwr", "upr", "p adj", "ROI", "variable", "agecorrection")
## Joining, by = c("Contrast", "diff", "lwr", "upr", "p adj", "ROI", "variable", "agecorrection")
## Joining, by = c("Contrast", "diff", "lwr", "upr", "p adj", "ROI", "variable", "agecorrection")
## Joining, by = c("Contrast", "diff", "lwr", "upr", "p adj", "ROI", "variable", "agecorrection")
## Df Sum Sq Mean Sq F value Pr(>F)
## Diagnostic 2 0.00406 0.002031 9.274 0.000131 ***
## Residuals 243 0.05321 0.000219
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Df Sum Sq Mean Sq F value Pr(>F)
## Diagnostic 2 0.00333 0.0016636 5.786 0.00352 **
## Residuals 239 0.06871 0.0002875
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Df Sum Sq Mean Sq F value Pr(>F)
## Diagnostic 2 0.00104 0.0005196 1.287 0.278
## Residuals 239 0.09650 0.0004038
## Df Sum Sq Mean Sq F value Pr(>F)
## Diagnostic 2 0.00106 0.0005308 2.023 0.135
## Residuals 239 0.06272 0.0002624
## Df Sum Sq Mean Sq F value Pr(>F)
## Diagnostic 2 0.01546 0.007730 23.09 6.81e-10 ***
## Residuals 239 0.08002 0.000335
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Joining, by = c("Contrast", "diff", "lwr", "upr", "p adj", "ROI", "variable", "agecorrection")
## Joining, by = c("Contrast", "diff", "lwr", "upr", "p adj", "ROI", "variable", "agecorrection")
## Joining, by = c("Contrast", "diff", "lwr", "upr", "p adj", "ROI", "variable", "agecorrection")
## Joining, by = c("Contrast", "diff", "lwr", "upr", "p adj", "ROI", "variable", "agecorrection")
## Joining, by = c("Contrast", "diff", "lwr", "upr", "p adj", "ROI", "variable", "agecorrection")
| Contrast | diff | lwr | upr | p adj | ROI | variable | agecorrection |
|---|---|---|---|---|---|---|---|
| CTL-AD | 0.0357862 | 0.0259901 | 0.0455822 | 0.0000000 | Temporal Lobe | T | no |
| CTL-AD | 0.0274719 | 0.0181490 | 0.0367947 | 0.0000000 | Temporal Lobe | K | no |
| CTL-AD | 0.0256409 | 0.0164781 | 0.0348037 | 0.0000000 | Temporal Lobe | T | yes |
| CTL-AD | 0.0236936 | 0.0139772 | 0.0334101 | 0.0000001 | Parietal Lobe | K | no |
| CTL-AD | 0.0232811 | 0.0151342 | 0.0314280 | 0.0000000 | Hemisphere | T | no |
| MCI-AD | 0.0231700 | 0.0123328 | 0.0340072 | 0.0000027 | Parietal Lobe | K | yes |
| MCI-AD | 0.0220857 | 0.0113799 | 0.0327915 | 0.0000062 | Temporal Lobe | T | no |
| CTL-AD | 0.0219700 | 0.0149287 | 0.0290112 | 0.0000000 | Hemisphere | K | no |
| CTL-AD | 0.0217219 | 0.0125042 | 0.0309396 | 0.0000002 | Frontal Lobe | T | no |
| MCI-AD | 0.0215577 | 0.0109389 | 0.0321764 | 0.0000088 | Parietal Lobe | K | no |
| CTL-AD | 0.0215291 | 0.0098944 | 0.0331639 | 0.0000563 | Occipital Lobe | K | no |
| CTL-AD | 0.0205060 | 0.0105897 | 0.0304223 | 0.0000059 | Parietal Lobe | K | yes |
| CTL-AD | 0.0198888 | 0.0111101 | 0.0286676 | 0.0000006 | Frontal Lobe | K | no |
| CTL-AD | 0.0184096 | 0.0091649 | 0.0276542 | 0.0000133 | Temporal Lobe | K | yes |
| CTL-AD | 0.0180360 | 0.0089976 | 0.0270743 | 0.0000127 | Parietal Lobe | T | no |
| MCI-AD | 0.0175805 | 0.0075668 | 0.0275942 | 0.0001419 | Temporal Lobe | T | yes |
| CTL-AD | 0.0166748 | 0.0099320 | 0.0234175 | 0.0000001 | Hemisphere | K | yes |
| MCI-AD | 0.0156506 | 0.0079612 | 0.0233401 | 0.0000083 | Hemisphere | K | no |
| MCI-AD | 0.0154769 | 0.0052883 | 0.0256655 | 0.0011957 | Temporal Lobe | K | no |
| MCI-AD | 0.0148228 | 0.0021075 | 0.0275380 | 0.0176045 | Occipital Lobe | K | no |
| MCI-AD | 0.0146092 | 0.0050152 | 0.0242032 | 0.0011588 | Frontal Lobe | K | no |
| MCI-AD | 0.0145794 | 0.0056825 | 0.0234763 | 0.0004190 | Hemisphere | T | no |
| MCI-AD | 0.0140342 | 0.0039605 | 0.0241079 | 0.0033437 | Frontal Lobe | T | no |
| CTL-MCI | 0.0137005 | 0.0068563 | 0.0205447 | 0.0000119 | Temporal Lobe | T | no |
| CTL-AD | 0.0134066 | 0.0060078 | 0.0208053 | 0.0000820 | Hemisphere | T | yes |
| CTL-AD | 0.0133951 | 0.0031571 | 0.0236330 | 0.0064013 | Occipital Lobe | T | no |
| MCI-AD | 0.0133679 | 0.0060044 | 0.0207314 | 0.0000793 | Hemisphere | K | yes |
| CTL-AD | 0.0132819 | 0.0028396 | 0.0237243 | 0.0083710 | Occipital Lobe | K | yes |
| CTL-AD | 0.0131241 | 0.0048707 | 0.0213775 | 0.0006473 | Frontal Lobe | K | yes |
| MCI-AD | 0.0122392 | 0.0023615 | 0.0221169 | 0.0106006 | Parietal Lobe | T | no |
| CTL-AD | 0.0122153 | 0.0037244 | 0.0207063 | 0.0023265 | Frontal Lobe | T | yes |
| CTL-MCI | 0.0119950 | 0.0054814 | 0.0185085 | 0.0000615 | Temporal Lobe | K | no |
| MCI-AD | 0.0119206 | 0.0005085 | 0.0233326 | 0.0383080 | Occipital Lobe | K | yes |
| MCI-AD | 0.0110653 | 0.0020454 | 0.0200851 | 0.0115641 | Frontal Lobe | K | yes |
| MCI-AD | 0.0110082 | 0.0009050 | 0.0211114 | 0.0289563 | Temporal Lobe | K | yes |
| MCI-AD | 0.0103224 | 0.0022425 | 0.0184024 | 0.0080307 | Hemisphere | T | yes |
| MCI-AD | 0.0098127 | 0.0005332 | 0.0190921 | 0.0353936 | Frontal Lobe | T | yes |
| MCI-AD | 0.0088378 | -0.0023509 | 0.0200265 | 0.1518261 | Occipital Lobe | T | no |
| CTL-MCI | 0.0087018 | 0.0030487 | 0.0143548 | 0.0010045 | Hemisphere | T | no |
| CTL-MCI | 0.0080604 | 0.0016586 | 0.0144621 | 0.0091860 | Temporal Lobe | T | yes |
| CTL-MCI | 0.0076877 | 0.0012476 | 0.0141278 | 0.0145545 | Frontal Lobe | T | no |
| CTL-MCI | 0.0074014 | 0.0009424 | 0.0138603 | 0.0200989 | Temporal Lobe | K | yes |
| MCI-AD | 0.0071304 | -0.0017348 | 0.0159955 | 0.1417788 | Parietal Lobe | T | yes |
| CTL-AD | 0.0068439 | -0.0032186 | 0.0169063 | 0.2458982 | Occipital Lobe | T | yes |
| CTL-MCI | 0.0067064 | -0.0014225 | 0.0148352 | 0.1282589 | Occipital Lobe | K | no |
| CTL-AD | 0.0065313 | -0.0015805 | 0.0146432 | 0.1412076 | Parietal Lobe | T | yes |
| CTL-MCI | 0.0063193 | 0.0014335 | 0.0112052 | 0.0071486 | Hemisphere | K | no |
| MCI-AD | 0.0059286 | -0.0050683 | 0.0169255 | 0.4127097 | Occipital Lobe | T | yes |
| CTL-MCI | 0.0057967 | -0.0005181 | 0.0121116 | 0.0794033 | Parietal Lobe | T | no |
| CTL-MCI | 0.0052796 | -0.0008538 | 0.0114130 | 0.1072424 | Frontal Lobe | K | no |
| CTL-MCI | 0.0045573 | -0.0025956 | 0.0117102 | 0.2915216 | Occipital Lobe | T | no |
| CTL-MCI | 0.0033069 | -0.0013718 | 0.0079856 | 0.2201387 | Hemisphere | K | yes |
| CTL-MCI | 0.0030841 | -0.0020498 | 0.0082180 | 0.3339127 | Hemisphere | T | yes |
| CTL-MCI | 0.0024027 | -0.0035297 | 0.0083350 | 0.6059254 | Frontal Lobe | T | yes |
| CTL-MCI | 0.0021359 | -0.0046527 | 0.0089245 | 0.7387226 | Parietal Lobe | K | no |
| CTL-MCI | 0.0020588 | -0.0037076 | 0.0078252 | 0.6772752 | Frontal Lobe | K | yes |
| CTL-MCI | 0.0013614 | -0.0059344 | 0.0086571 | 0.8988113 | Occipital Lobe | K | yes |
| CTL-MCI | 0.0009152 | -0.0061151 | 0.0079456 | 0.9493762 | Occipital Lobe | T | yes |
| CTL-MCI | -0.0005990 | -0.0062665 | 0.0050684 | 0.9663291 | Parietal Lobe | T | yes |
| CTL-MCI | -0.0026640 | -0.0095922 | 0.0042642 | 0.6365167 | Parietal Lobe | K | yes |
##
## Call:
## lm(formula = K ~ Age * ROI * Diagnostic, data = dados)
##
## Residuals:
## Min 1Q Median 3Q Max
## -0.062579 -0.012117 0.000376 0.012462 0.061381
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -4.175e-01 4.632e-02 -9.013 <2e-16 ***
## Age -9.139e-04 5.991e-04 -1.525 0.1274
## ROIhemisphere -7.977e-02 6.551e-02 -1.218 0.2236
## ROIO 7.811e-02 6.551e-02 1.192 0.2333
## ROIP 1.091e-01 6.551e-02 1.665 0.0962 .
## ROIT 7.845e-02 6.551e-02 1.198 0.2313
## DiagnosticMCI -2.951e-02 5.784e-02 -0.510 0.6100
## DiagnosticCTL -1.502e-02 4.784e-02 -0.314 0.7535
## Age:ROIhemisphere 2.715e-04 8.472e-04 0.320 0.7487
## Age:ROIO 4.846e-04 8.472e-04 0.572 0.5674
## Age:ROIP 5.873e-05 8.472e-04 0.069 0.9448
## Age:ROIT 2.245e-04 8.472e-04 0.265 0.7911
## Age:DiagnosticMCI 5.356e-04 7.671e-04 0.698 0.4852
## Age:DiagnosticCTL 3.757e-04 6.254e-04 0.601 0.5481
## ROIhemisphere:DiagnosticMCI 4.889e-02 8.225e-02 0.594 0.5524
## ROIO:DiagnosticMCI 3.163e-02 8.179e-02 0.387 0.6990
## ROIP:DiagnosticMCI 5.957e-02 8.179e-02 0.728 0.4666
## ROIT:DiagnosticMCI 1.041e-01 8.179e-02 1.273 0.2033
## ROIhemisphere:DiagnosticCTL 2.221e-02 6.761e-02 0.328 0.7426
## ROIO:DiagnosticCTL 3.178e-02 6.765e-02 0.470 0.6387
## ROIP:DiagnosticCTL 1.799e-02 6.765e-02 0.266 0.7904
## ROIT:DiagnosticCTL 4.475e-02 6.765e-02 0.662 0.5084
## Age:ROIhemisphere:DiagnosticMCI -6.302e-04 1.091e-03 -0.578 0.5637
## Age:ROIO:DiagnosticMCI -4.052e-04 1.085e-03 -0.374 0.7088
## Age:ROIP:DiagnosticMCI -7.229e-04 1.085e-03 -0.666 0.5053
## Age:ROIT:DiagnosticMCI -1.413e-03 1.085e-03 -1.302 0.1931
## Age:ROIhemisphere:DiagnosticCTL -2.606e-04 8.839e-04 -0.295 0.7681
## Age:ROIO:DiagnosticCTL -3.753e-04 8.845e-04 -0.424 0.6714
## Age:ROIP:DiagnosticCTL -2.050e-04 8.845e-04 -0.232 0.8168
## Age:ROIT:DiagnosticCTL -5.254e-04 8.845e-04 -0.594 0.5526
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.01838 on 1192 degrees of freedom
## Multiple R-squared: 0.94, Adjusted R-squared: 0.9386
## F-statistic: 644.1 on 29 and 1192 DF, p-value: < 2.2e-16
| Df | Sum Sq | Mean Sq | F value | Pr(>F) | |
|---|---|---|---|---|---|
| Age | 1 | 0.0590835 | 0.0590835 | 174.8142738 | 0.0000000 |
| ROI | 4 | 6.2245775 | 1.5561444 | 4604.2704223 | 0.0000000 |
| Diagnostic | 2 | 0.0231449 | 0.0115724 | 34.2401334 | 0.0000000 |
| Age:ROI | 4 | 0.0029111 | 0.0007278 | 2.1533287 | 0.0722192 |
| Age:Diagnostic | 2 | 0.0002838 | 0.0001419 | 0.4198882 | 0.6572175 |
| ROI:Diagnostic | 8 | 0.0019338 | 0.0002417 | 0.7152168 | 0.6782953 |
| Age:ROI:Diagnostic | 8 | 0.0008803 | 0.0001100 | 0.3255776 | 0.9564882 |
| Residuals | 1192 | 0.4028704 | 0.0003380 | NA | NA |
## `summarise()` has grouped output by 'Diagnostic', 'ROI'. You can override using the `.groups` argument.
## Call:
## aov(formula = K ~ Diagnostic * ROI * Age_interval10, data = dados)
##
## Terms:
## Diagnostic ROI Age_interval10 Diagnostic:ROI
## Sum of Squares 0.058398 6.222467 0.036026 0.002912
## Deg. of Freedom 2 4 4 8
## Diagnostic:Age_interval10 ROI:Age_interval10
## Sum of Squares 0.008345 0.004742
## Deg. of Freedom 4 16
## Diagnostic:ROI:Age_interval10 Residuals
## Sum of Squares 0.005051 0.377744
## Deg. of Freedom 16 1167
##
## Residual standard error: 0.01799133
## 20 out of 75 effects not estimable
## Estimated effects may be unbalanced
## Df Sum Sq Mean Sq F value Pr(>F)
## Diagnostic 2 0.058 0.0292 90.207 < 2e-16 ***
## ROI 4 6.222 1.5556 4805.912 < 2e-16 ***
## Age_interval10 4 0.036 0.0090 27.824 < 2e-16 ***
## Diagnostic:ROI 8 0.003 0.0004 1.125 0.344
## Diagnostic:Age_interval10 4 0.008 0.0021 6.446 3.95e-05 ***
## ROI:Age_interval10 16 0.005 0.0003 0.916 0.551
## Diagnostic:ROI:Age_interval10 16 0.005 0.0003 0.975 0.482
## Residuals 1167 0.378 0.0003
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Call:
## aov(formula = K_age_decay ~ Diagnostic * ROI * Age_interval10,
## data = dados)
##
## Terms:
## Diagnostic ROI Age_interval10 Diagnostic:ROI
## Sum of Squares 0.0296929 0.5176382 0.0144076 0.0032762
## Deg. of Freedom 2 4 4 8
## Diagnostic:Age_interval10 ROI:Age_interval10
## Sum of Squares 0.0094489 0.0026622
## Deg. of Freedom 4 16
## Diagnostic:ROI:Age_interval10 Residuals
## Sum of Squares 0.0010631 0.3687459
## Deg. of Freedom 16 1167
##
## Residual standard error: 0.01777576
## 20 out of 75 effects not estimable
## Estimated effects may be unbalanced
## Df Sum Sq Mean Sq F value Pr(>F)
## Diagnostic 2 0.0297 0.01485 46.986 < 2e-16 ***
## ROI 4 0.5176 0.12941 409.553 < 2e-16 ***
## Age_interval10 4 0.0144 0.00360 11.399 4.45e-09 ***
## Diagnostic:ROI 8 0.0033 0.00041 1.296 0.241
## Diagnostic:Age_interval10 4 0.0094 0.00236 7.476 6.04e-06 ***
## ROI:Age_interval10 16 0.0027 0.00017 0.527 0.934
## Diagnostic:ROI:Age_interval10 16 0.0011 0.00007 0.210 1.000
## Residuals 1167 0.3687 0.00032
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Df Sum Sq Mean Sq F value Pr(>F)
## Diagnostic 2 0.01121 0.005607 32.072 4.84e-13 ***
## Age_interval10 4 0.00595 0.001488 8.508 1.99e-06 ***
## Diagnostic:Age_interval10 4 0.00116 0.000289 1.652 0.162
## Residuals 235 0.04109 0.000175
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
| diff | lwr | upr | p adj | |
|---|---|---|---|---|
| AD:60-CTL:40 | -0.0412054 | -0.0770390 | -0.0053717 | 0.0089726 |
| AD:70-CTL:40 | -0.0268346 | -0.0464615 | -0.0072077 | 0.0004533 |
| MCI:70-CTL:40 | -0.0180712 | -0.0357028 | -0.0004395 | 0.0384122 |
| AD:80-CTL:40 | -0.0498070 | -0.0724702 | -0.0271438 | 0.0000000 |
| MCI:80-CTL:40 | -0.0342529 | -0.0587320 | -0.0097739 | 0.0002797 |
| AD:70-CTL:50 | -0.0181372 | -0.0327663 | -0.0035082 | 0.0027845 |
| AD:80-CTL:50 | -0.0411096 | -0.0596140 | -0.0226052 | 0.0000000 |
| MCI:80-CTL:50 | -0.0255555 | -0.0462441 | -0.0048670 | 0.0029616 |
| AD:80-MCI:60 | -0.0322890 | -0.0510025 | -0.0135755 | 0.0000012 |
| AD:70-CTL:60 | -0.0140077 | -0.0266021 | -0.0014134 | 0.0142227 |
| AD:80-CTL:60 | -0.0369801 | -0.0539219 | -0.0200384 | 0.0000000 |
| MCI:80-CTL:60 | -0.0214261 | -0.0407296 | -0.0021225 | 0.0146138 |
| AD:80-AD:70 | -0.0229724 | -0.0425993 | -0.0033455 | 0.0069268 |
| AD:80-MCI:70 | -0.0317358 | -0.0493675 | -0.0141042 | 0.0000003 |
| AD:80-CTL:70 | -0.0326927 | -0.0499066 | -0.0154787 | 0.0000000 |
## Df Sum Sq Mean Sq F value Pr(>F)
## Diagnostic 2 0.00622 0.0031120 17.947 5.58e-08 ***
## Age_interval10 4 0.00219 0.0005474 3.157 0.0149 *
## Diagnostic:Age_interval10 4 0.00125 0.0003136 1.809 0.1279
## Residuals 235 0.04075 0.0001734
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
| diff | lwr | upr | p adj | |
|---|---|---|---|---|
| AD:80-CTL:40 | -0.0328489 | -0.0554186 | -0.0102791 | 0.0001171 |
| AD:80-CTL:50 | -0.0279230 | -0.0463511 | -0.0094948 | 0.0000448 |
| AD:80-MCI:60 | -0.0252366 | -0.0438729 | -0.0066002 | 0.0005505 |
| AD:80-CTL:60 | -0.0290177 | -0.0458896 | -0.0121458 | 0.0000013 |
| AD:80-AD:70 | -0.0199235 | -0.0394695 | -0.0003776 | 0.0407941 |
| AD:80-MCI:70 | -0.0274766 | -0.0450356 | -0.0099177 | 0.0000198 |
| AD:80-CTL:70 | -0.0285306 | -0.0456736 | -0.0113877 | 0.0000036 |
## `summarise()` has grouped output by 'ROI'. You can override using the `.groups` argument.
## `summarise()` has grouped output by 'Diagnostic'. You can override using the `.groups` argument.
## Df Sum Sq Mean Sq F value Pr(>F)
## Diagnostic 2 0.01121 0.005607 28.933 5.53e-12 ***
## Age.group 1 0.00147 0.001472 7.595 0.0063 **
## Diagnostic:Age.group 2 0.00021 0.000103 0.532 0.5883
## Residuals 240 0.04651 0.000194
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = K ~ Diagnostic * Age.group, data = dados_hemi_v1_DACTL)
##
## $Diagnostic
## diff lwr upr p adj
## MCI-AD 0.015650636 0.008048585 0.02325269 0.0000065
## CTL-AD 0.021969964 0.015008754 0.02893117 0.0000000
## CTL-MCI 0.006319328 0.001489023 0.01114963 0.0064052
##
## $Age.group
## diff lwr upr p adj
## 76-86-66-75 -0.00532469 -0.009494614 -0.001154767 0.0125435
##
## $`Diagnostic:Age.group`
## diff lwr upr p adj
## MCI:66-75-AD:66-75 0.0132027421 -0.002070637 0.028476122 0.1331506
## CTL:66-75-AD:66-75 0.0175295279 0.002973143 0.032085912 0.0083090
## AD:76-86-AD:66-75 -0.0071676450 -0.024162293 0.009827003 0.8308950
## MCI:76-86-AD:66-75 0.0039835620 -0.013011086 0.020978210 0.9847044
## CTL:76-86-AD:66-75 0.0135118232 -0.003219326 0.030242972 0.1900489
## CTL:66-75-MCI:66-75 0.0043267858 -0.002400958 0.011054529 0.4372864
## AD:76-86-MCI:66-75 -0.0203703871 -0.031424448 -0.009316327 0.0000040
## MCI:76-86-MCI:66-75 -0.0092191801 -0.020273241 0.001834880 0.1617523
## CTL:76-86-MCI:66-75 0.0003090811 -0.010335427 0.010953589 0.9999994
## AD:76-86-CTL:66-75 -0.0246971729 -0.034737316 -0.014657030 0.0000000
## MCI:76-86-CTL:66-75 -0.0135459659 -0.023586108 -0.003505823 0.0018865
## CTL:76-86-CTL:66-75 -0.0040177047 -0.013605079 0.005569670 0.8346759
## MCI:76-86-AD:76-86 0.0111512070 -0.002180492 0.024482906 0.1593089
## CTL:76-86-AD:76-86 0.0206794682 0.007685336 0.033673601 0.0001122
## CTL:76-86-MCI:76-86 0.0095282612 -0.003465871 0.022522394 0.2873585
## Df Sum Sq Mean Sq F value Pr(>F)
## Diagnostic 2 0.1539 0.07697 6.470 0.00183 **
## Age.group 1 0.0226 0.02262 1.901 0.16925
## Diagnostic:Age.group 2 0.1029 0.05145 4.324 0.01429 *
## Residuals 240 2.8555 0.01190
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = S ~ Diagnostic * Age.group, data = dados_hemi_v1_DACTL)
##
## $Diagnostic
## diff lwr upr p adj
## MCI-AD -0.04832085 -0.1078839 0.011242168 0.1371341
## CTL-AD -0.07844671 -0.1329887 -0.023904762 0.0023323
## CTL-MCI -0.03012586 -0.0679719 0.007720179 0.1475039
##
## $Age.group
## diff lwr upr p adj
## 76-86-66-75 0.02087186 -0.01180001 0.05354373 0.2094573
##
## $`Diagnostic:Age.group`
## diff lwr upr p adj
## MCI:66-75-AD:66-75 -0.0953105293 -0.214979358 0.024358299 0.2029580
## CTL:66-75-AD:66-75 -0.0981648521 -0.212215936 0.015886232 0.1364256
## AD:76-86-AD:66-75 -0.0303712354 -0.163526415 0.102783945 0.9864784
## MCI:76-86-AD:66-75 -0.0001112472 -0.133266427 0.133043933 1.0000000
## CTL:76-86-AD:66-75 -0.1082371945 -0.239327828 0.022853439 0.1703321
## CTL:66-75-MCI:66-75 -0.0028543228 -0.055567029 0.049858384 0.9999872
## AD:76-86-MCI:66-75 0.0649392939 -0.021670645 0.151549233 0.2634887
## MCI:76-86-MCI:66-75 0.0951992821 0.008589343 0.181809221 0.0218904
## CTL:76-86-MCI:66-75 -0.0129266652 -0.096327707 0.070474377 0.9977733
## AD:76-86-CTL:66-75 0.0677936167 -0.010872150 0.146459383 0.1354651
## MCI:76-86-CTL:66-75 0.0980536049 0.019387838 0.176719372 0.0054879
## CTL:76-86-CTL:66-75 -0.0100723424 -0.085190617 0.065045932 0.9988905
## MCI:76-86-AD:76-86 0.0302599882 -0.074195529 0.134715505 0.9612657
## CTL:76-86-AD:76-86 -0.0778659592 -0.179676603 0.023944685 0.2428766
## CTL:76-86-MCI:76-86 -0.1081259473 -0.209936592 -0.006315303 0.0301371
## [1] "K decreasing rate/year (CTL-temporallobe)= 0.0024"
## [1] "K decreasing rate/year (AD-temporallobe) = 0.0033"
## [1] "K decreasing rate/year (MCI-temporallobe) = 0.0039"
## [1] "K decreasing rate/year (AD/CTL-temporallobe) = 1.4"
## [1] "K decreasing rate/year (MCI/CTL-temporallobe) = 1.6"
## [1] "S decreasing rate/year (CTL-temporallobe)= 0.015"
## [1] "S decreasing rate/year (AD-temporallobe) = 0.024"
## [1] "S decreasing rate/year (MCI-temporallobe) = 0.027"
## [1] "S decreasing rate/year (AD/CTL-temporallobe) = 1.6"
## [1] "S decreasing rate/year (MCI/CTL-temporallobe) = 1.8"
## [1] "I decreasing rate/year (CTL-temporallobe)= 0.012"
## [1] "I decreasing rate/year (AD-temporallobe) = 0.013"
## [1] "I decreasing rate/year (MCI-temporallobe) = 0.022"
## [1] "I decreasing rate/year (AD/CTL-temporallobe) = 1.1"
## [1] "I decreasing rate/year (MCI/CTL-temporallobe) = 1.9"
| Diagnostic | N | Mean_COGNITIVE_INDEX | STD_COGNITIVE_INDEX | Mean_A7_A5 | STD_A7_A5 | Mean_TMT_B_A | STD_TMT_B_A | Mean_relogio | STD_relogio | Mean_DIGIT_SPAN_BACK | STD_DIGIT_SPAN_BACK | Mean_Lipoxina | STD_Lipoxina | Mean_AB1_40 | STD_AB1_40 | Mean_AB1_42 | STD_AB1_42 | Mean_TAU | STD_TAU |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| AD | 13 | -3.35 | 1.48 | 0.24 | 0.31 | 226.69 | 131.29 | 8.92 | 1.64 | 3.77 | 1.39 | 79.10 | 73.64 | 5664.22 | 1665.88 | 279.71 | 60.00 | 632.00 | 278.83 |
| MCI | 33 | -1.48 | 1.28 | 0.54 | 0.30 | 129.73 | 105.03 | 8.61 | 1.84 | 4.70 | 1.60 | 120.24 | 49.46 | 4557.04 | 2559.94 | 413.35 | 306.30 | 444.21 | 196.85 |
| CTL | 77 | 0.21 | 0.65 | 0.82 | 0.18 | 58.53 | 48.00 | 9.29 | 1.21 | 5.84 | 1.74 | 127.15 | 61.52 | 4192.04 | 1915.04 | 533.92 | 242.82 | 354.87 | 194.95 |
## `summarise()` has grouped output by 'clinical_test'. You can override using the `.groups` argument.
| clinical_test | Diagnostic | N | Mean | STD |
|---|---|---|---|---|
| A7/A5 | AD | 13 | 0.24 | 0.31 |
| A7/A5 | MCI | 33 | 0.54 | 0.30 |
| A7/A5 | CTL | 77 | 0.82 | 0.18 |
| AB1-40 | AD | 13 | 5664.22 | 1603.33 |
| AB1-40 | MCI | 33 | 4557.04 | 2516.28 |
| AB1-40 | CTL | 77 | 4192.04 | 1900.51 |
| AB1-42 | AD | 13 | 279.71 | 57.75 |
| AB1-42 | MCI | 33 | 413.35 | 301.08 |
| AB1-42 | CTL | 77 | 533.92 | 240.98 |
| AB1_ratio | AD | 13 | 0.05 | 0.01 |
| AB1_ratio | MCI | 33 | 0.12 | 0.12 |
| AB1_ratio | CTL | 77 | 0.16 | 0.12 |
| COGNITIVE_INDEX | AD | 13 | -3.35 | 1.46 |
| COGNITIVE_INDEX | MCI | 33 | -1.48 | 1.27 |
| COGNITIVE_INDEX | CTL | 77 | 0.21 | 0.64 |
| DIGIT SPAN BACK | AD | 13 | 3.77 | 1.37 |
| DIGIT SPAN BACK | MCI | 33 | 4.70 | 1.59 |
| DIGIT SPAN BACK | CTL | 77 | 5.84 | 1.74 |
| Lipoxina | AD | 13 | 79.10 | 70.87 |
| Lipoxina | MCI | 33 | 120.24 | 48.62 |
| Lipoxina | CTL | 77 | 127.15 | 61.04 |
| MMSE | AD | 13 | 22.92 | 3.53 |
| MMSE | MCI | 33 | 26.09 | 1.99 |
| MMSE | CTL | 77 | 27.94 | 1.39 |
| relogio | AD | 13 | 8.92 | 1.61 |
| relogio | MCI | 33 | 8.61 | 1.83 |
| relogio | CTL | 77 | 9.29 | 1.20 |
| TAU | AD | 13 | 632.00 | 268.36 |
| TAU | MCI | 33 | 444.21 | 193.50 |
| TAU | CTL | 77 | 354.87 | 193.47 |
| TAU_AB1_42_ratio | AD | 13 | 2.20 | 0.54 |
| TAU_AB1_42_ratio | MCI | 33 | 1.60 | 1.40 |
| TAU_AB1_42_ratio | CTL | 77 | 0.79 | 0.60 |
| TAU_AB1_ratio | AD | 13 | 13027.12 | 6837.97 |
| TAU_AB1_ratio | MCI | 33 | 7216.44 | 6806.68 |
| TAU_AB1_ratio | CTL | 77 | 3429.86 | 3571.77 |
| TMT B-A | AD | 13 | 226.69 | 129.06 |
| TMT B-A | MCI | 33 | 129.73 | 104.33 |
| TMT B-A | CTL | 77 | 58.53 | 47.86 |
K:
Avg Thickness:
S:
I:
| morphological_parameter | clinical_test | t | df | Correlation | eff.size | eff.size.conf.low | eff.size.conf.high | ROI | Age_correction | pval.adj |
|---|---|---|---|---|---|---|---|---|---|---|
| K | A7/A5 | 5.800 | 240 | 0.350 | 0.75 | 0.37 | 1.10 | Hemisphere | no | 0.000 |
| K | COGNITIVE_INDEX | 6.700 | 240 | 0.400 | 0.87 | 0.48 | 1.30 | Hemisphere | no | 0.000 |
| K | DIGIT SPAN BACK | 4.100 | 240 | 0.250 | 0.52 | 0.15 | 0.89 | Hemisphere | no | 0.000 |
| K | relogio | 0.180 | 240 | 0.012 | 0.02 | -0.33 | 0.38 | Hemisphere | no | 1.000 |
| K | TMT B-A | -4.800 | 240 | -0.290 | -0.61 | -0.98 | -0.23 | Hemisphere | no | 0.000 |
| K | A7/A5 | 4.300 | 240 | 0.270 | 0.56 | 0.19 | 0.93 | Hemisphere | yes | 0.000 |
| K | COGNITIVE_INDEX | 5.100 | 240 | 0.310 | 0.65 | 0.28 | 1.00 | Hemisphere | yes | 0.000 |
| K | DIGIT SPAN BACK | 3.300 | 240 | 0.200 | 0.41 | 0.04 | 0.77 | Hemisphere | yes | 0.005 |
| K | relogio | -0.790 | 240 | -0.051 | -0.10 | -0.46 | 0.26 | Hemisphere | yes | 1.000 |
| K | TMT B-A | -3.300 | 240 | -0.210 | -0.43 | -0.80 | -0.06 | Hemisphere | yes | 0.004 |
| logAvgThickness | A7/A5 | 6.700 | 240 | 0.390 | 0.85 | 0.46 | 1.20 | Hemisphere | no | 0.000 |
| logAvgThickness | COGNITIVE_INDEX | 6.800 | 240 | 0.400 | 0.87 | 0.48 | 1.30 | Hemisphere | no | 0.000 |
| logAvgThickness | DIGIT SPAN BACK | 3.200 | 240 | 0.200 | 0.41 | 0.04 | 0.77 | Hemisphere | no | 0.005 |
| logAvgThickness | relogio | 1.500 | 240 | 0.099 | 0.20 | -0.16 | 0.56 | Hemisphere | no | 0.982 |
| logAvgThickness | TMT B-A | -3.500 | 240 | -0.220 | -0.45 | -0.82 | -0.08 | Hemisphere | no | 0.002 |
| logAvgThickness | A7/A5 | 4.200 | 240 | 0.260 | 0.54 | 0.17 | 0.91 | Hemisphere | yes | 0.000 |
| logAvgThickness | COGNITIVE_INDEX | 4.200 | 240 | 0.260 | 0.54 | 0.17 | 0.91 | Hemisphere | yes | 0.000 |
| logAvgThickness | DIGIT SPAN BACK | 1.800 | 240 | 0.110 | 0.22 | -0.14 | 0.58 | Hemisphere | yes | 0.311 |
| logAvgThickness | relogio | 0.044 | 240 | 0.003 | 0.01 | -0.35 | 0.36 | Hemisphere | yes | 1.000 |
| logAvgThickness | TMT B-A | -1.100 | 240 | -0.069 | -0.14 | -0.50 | 0.22 | Hemisphere | yes | 1.000 |
| K | AB1-40 | -0.760 | 94 | -0.078 | -0.16 | -0.73 | 0.42 | Hemisphere | no | 0.894 |
| K | AB1-42 | 2.500 | 94 | 0.250 | 0.52 | -0.07 | 1.10 | Hemisphere | no | 0.031 |
| K | AB1_ratio | 1.700 | 94 | 0.180 | 0.37 | -0.22 | 0.95 | Hemisphere | no | 0.167 |
| K | Lipoxina | 0.850 | 92 | 0.088 | 0.18 | -0.40 | 0.76 | Hemisphere | no | 0.795 |
| K | TAU | -2.600 | 94 | -0.260 | -0.54 | -1.10 | 0.05 | Hemisphere | no | 0.023 |
| K | TAU_AB1_42_ratio | -3.200 | 94 | -0.310 | -0.65 | -1.20 | -0.05 | Hemisphere | no | 0.004 |
| K | TAU_AB1_ratio | -2.800 | 94 | -0.280 | -0.58 | -1.20 | 0.01 | Hemisphere | no | 0.011 |
| K | AB1-40 | -0.380 | 94 | -0.039 | -0.08 | -0.65 | 0.49 | Hemisphere | yes | 1.000 |
| K | AB1-42 | 2.200 | 94 | 0.220 | 0.45 | -0.14 | 1.00 | Hemisphere | yes | 0.064 |
| K | AB1_ratio | 1.400 | 94 | 0.150 | 0.30 | -0.27 | 0.88 | Hemisphere | yes | 0.317 |
| K | Lipoxina | 1.000 | 92 | 0.110 | 0.22 | -0.36 | 0.80 | Hemisphere | yes | 0.594 |
| K | TAU | -1.700 | 94 | -0.170 | -0.35 | -0.93 | 0.24 | Hemisphere | yes | 0.177 |
| K | TAU_AB1_42_ratio | -2.200 | 94 | -0.220 | -0.45 | -1.00 | 0.14 | Hemisphere | yes | 0.062 |
| K | TAU_AB1_ratio | -1.900 | 94 | -0.190 | -0.39 | -0.97 | 0.20 | Hemisphere | yes | 0.116 |
| logAvgThickness | AB1-40 | -2.100 | 94 | -0.210 | -0.43 | -1.00 | 0.16 | Hemisphere | no | 0.076 |
| logAvgThickness | AB1-42 | 0.840 | 94 | 0.086 | 0.17 | -0.40 | 0.75 | Hemisphere | no | 0.805 |
| logAvgThickness | AB1_ratio | 2.000 | 94 | 0.200 | 0.41 | -0.18 | 0.99 | Hemisphere | no | 0.104 |
| logAvgThickness | Lipoxina | -0.510 | 92 | -0.053 | -0.11 | -0.68 | 0.47 | Hemisphere | no | 1.000 |
| logAvgThickness | TAU | -4.300 | 94 | -0.410 | -0.90 | -1.50 | -0.27 | Hemisphere | no | 0.000 |
| logAvgThickness | TAU_AB1_42_ratio | -3.500 | 94 | -0.340 | -0.72 | -1.30 | -0.12 | Hemisphere | no | 0.001 |
| logAvgThickness | TAU_AB1_ratio | -4.000 | 94 | -0.380 | -0.82 | -1.40 | -0.20 | Hemisphere | no | 0.000 |
| logAvgThickness | AB1-40 | -1.600 | 94 | -0.160 | -0.32 | -0.90 | 0.26 | Hemisphere | yes | 0.220 |
| logAvgThickness | AB1-42 | -0.018 | 94 | -0.002 | 0.00 | -0.58 | 0.57 | Hemisphere | yes | 1.000 |
| logAvgThickness | AB1_ratio | 1.400 | 94 | 0.140 | 0.28 | -0.29 | 0.86 | Hemisphere | yes | 0.351 |
| logAvgThickness | Lipoxina | -0.380 | 92 | -0.040 | -0.08 | -0.66 | 0.50 | Hemisphere | yes | 1.000 |
| logAvgThickness | TAU | -2.900 | 94 | -0.280 | -0.58 | -1.20 | 0.01 | Hemisphere | yes | 0.011 |
| logAvgThickness | TAU_AB1_42_ratio | -1.600 | 94 | -0.170 | -0.35 | -0.93 | 0.24 | Hemisphere | yes | 0.212 |
| logAvgThickness | TAU_AB1_ratio | -2.400 | 94 | -0.240 | -0.49 | -1.10 | 0.09 | Hemisphere | yes | 0.038 |
| K | DIGIT SPAN BACK | 2.700 | 240 | 0.170 | 0.35 | -0.02 | 0.71 | Frontal lobe | yes | 0.034 |
| K | relogio | -0.570 | 240 | -0.037 | -0.07 | -0.43 | 0.28 | Frontal lobe | yes | 1.000 |
| K | TMT B-A | -1.800 | 240 | -0.120 | -0.24 | -0.60 | 0.12 | Frontal lobe | yes | 0.275 |
| K | DIGIT SPAN BACK | 3.400 | 240 | 0.220 | 0.45 | 0.08 | 0.82 | Frontal lobe | no | 0.003 |
| K | relogio | 0.240 | 240 | 0.016 | 0.03 | -0.33 | 0.39 | Frontal lobe | no | 1.000 |
| K | TMT B-A | -3.100 | 240 | -0.200 | -0.41 | -0.77 | -0.04 | Frontal lobe | no | 0.009 |
| logAvgThickness | DIGIT SPAN BACK | 2.500 | 240 | 0.160 | 0.32 | -0.04 | 0.69 | Frontal lobe | no | 0.057 |
| logAvgThickness | relogio | 2.200 | 240 | 0.140 | 0.28 | -0.08 | 0.64 | Frontal lobe | no | 0.216 |
| logAvgThickness | TMT B-A | -3.300 | 240 | -0.210 | -0.43 | -0.80 | -0.06 | Frontal lobe | no | 0.005 |
| logAvgThickness | DIGIT SPAN BACK | 1.100 | 240 | 0.073 | 0.15 | -0.21 | 0.51 | Frontal lobe | yes | 1.000 |
| logAvgThickness | relogio | 1.000 | 240 | 0.065 | 0.13 | -0.23 | 0.49 | Frontal lobe | yes | 1.000 |
| logAvgThickness | TMT B-A | -1.200 | 240 | -0.075 | -0.15 | -0.51 | 0.21 | Frontal lobe | yes | 0.968 |
| K | relogio | -1.100 | 240 | -0.072 | -0.14 | -0.50 | 0.21 | Parietal lobe | yes | 1.000 |
| K | relogio | -0.260 | 240 | -0.017 | -0.03 | -0.39 | 0.32 | Parietal lobe | no | 1.000 |
| logAvgThickness | relogio | 1.400 | 240 | 0.089 | 0.18 | -0.18 | 0.54 | Parietal lobe | no | 1.000 |
| logAvgThickness | relogio | -0.330 | 240 | -0.021 | -0.04 | -0.40 | 0.32 | Parietal lobe | yes | 1.000 |
| K | relogio | -1.200 | 240 | -0.076 | -0.15 | -0.51 | 0.21 | Occipital lobe | yes | 1.000 |
| K | relogio | -0.670 | 240 | -0.044 | -0.09 | -0.45 | 0.27 | Occipital lobe | no | 1.000 |
| logAvgThickness | relogio | -1.200 | 240 | -0.080 | -0.16 | -0.52 | 0.20 | Occipital lobe | no | 1.000 |
| logAvgThickness | relogio | -2.200 | 240 | -0.140 | -0.28 | -0.64 | 0.08 | Occipital lobe | yes | 0.251 |
| K | A7/A5 | 3.400 | 240 | 0.220 | 0.45 | 0.08 | 0.82 | Temporal lobe | yes | 0.003 |
| K | A7/A5 | 4.900 | 240 | 0.300 | 0.63 | 0.25 | 1.00 | Temporal lobe | no | 0.000 |
| logAvgThickness | A7/A5 | 7.500 | 240 | 0.430 | 0.95 | 0.56 | 1.40 | Temporal lobe | no | 0.000 |
| logAvgThickness | A7/A5 | 5.700 | 240 | 0.340 | 0.72 | 0.34 | 1.10 | Temporal lobe | yes | 0.000 |
K:
Avg Thickness:
S:
I:
require(foreign)
## Loading required package: foreign
require(nnet)
require(reshape2)
## Loading required package: reshape2
##
## Attaching package: 'reshape2'
## The following object is masked from 'package:tidyr':
##
## smiths
library(stargazer)
require(betareg)
## Loading required package: betareg
#Diagnostic <- factor(dados_hemi_v1$Diagnostic, levels = c("AD", "MCI","CTL"))
dados_hemi_v1$Diagnostic <- relevel(dados_hemi_v1$Diagnostic, "CTL")
set.seed(0)
N_diag <- dados_hemi_v1 %>% dplyr::select(SUBJ, Diagnostic) %>% unique() %>% group_by(Diagnostic) %>% summarise(n_DIAG = n_distinct(SUBJ))
dados_hemi_v1_filter <- dados_hemi_v1 %>% dplyr::select(SUBJ, Diagnostic) %>% unique()
N_CTL <- as.double(floor(N_diag[1,2]*0.8))
N_CCL <- as.double(round(N_diag[3,2]*0.8))
N_ALZ <- as.double(round(N_diag[2,2]*0.8))
test.samples <- c(sample(which(dados_hemi_v1_filter$Diagnostic == "AD"), N_ALZ), sample(which(dados_hemi_v1_filter$Diagnostic == "CTL"), N_CTL), sample(which(dados_hemi_v1_filter$Diagnostic == "MCI"), N_CCL))
subj.training <- as_tibble(dados_hemi_v1_filter[-test.samples, ]$SUBJ)
colnames(subj.training) <- c("SUBJ")
# filter(dados_hemi_v1, SUBJ == subj.training)
train.data <- anti_join(dados_hemi_v1, subj.training)
## Joining, by = "SUBJ"
test.data <- semi_join(dados_hemi_v1, subj.training)
## Joining, by = "SUBJ"
#train.data <- dados_hemi_v1[-test.samples, ]
#test.data <- dados_hemi_v1[test.samples, ]
caret::featurePlot(x = dados_hemi_v1[, c("K", "logAvgThickness", "K_age_decay", "logAvgThickness_age_decay")], y = dados_hemi_v1$Diagnostic, plot = "box", scales = list(y = list(relation = "free"), x = list(rot = 90)), layout = c(4, 1))
caret::featurePlot(x = dados_hemi_v1[, c("K", "I", "S")], y = dados_hemi_v1$Diagnostic, plot = "box", scales = list(y = list(relation = "free"), x = list(rot = 90)), layout = c(3,1))
caret::featurePlot(x = dados_hemi_v1[, c("K_age_decay", "I_age_decay", "S_age_decay")], y = dados_hemi_v1$Diagnostic, plot = "box", scales = list(y = list(relation = "free"), x = list(rot = 90)), layout = c(3,1))
print(n_distinct(dados_hemi_v1$SUBJ))
## [1] 123
print(n_distinct(train.data$SUBJ))
## [1] 97
print(n_distinct(test.data$SUBJ))
## [1] 26
# ggplot(dados_hemi_v1, aes(x = Diagnostic, y = K, color = Diagnostic, fill = Diagnostic)) +
# geom_violin(trim = FALSE, alpha = 0.4) + geom_jitter() +
# theme_pubr() + stat_compare_means(method = "anova") + labs(caption = paste("N = ", n_distinct(filter(dados_hemi_v1, !is.na(K))$SUBJ)))
#aov1 <- aov(K ~ Diagnostic, dados_hemi_v1)
#TukeyHSD(aov1)
# ggplot(dados_hemi_v1, aes(x = Diagnostic, y = K_age_decay, color = Diagnostic, fill = Diagnostic)) +
# geom_violin(trim = FALSE, alpha = 0.4) + geom_jitter() +
# theme_pubr() + stat_compare_means(method = "anova") + labs(caption = paste("N = ", n_distinct(filter(dados_hemi_v1, !is.na(K_age_decay))$SUBJ)))
#aov2 <- aov(K_age_decay ~ Diagnostic, dados_hemi_v1)
#TukeyHSD(aov2)
#ggplot(dados_hemi_v1, aes(x = Diagnostic, y = logAvgThickness, color = Diagnostic, fill = Diagnostic)) +
#geom_violin(trim = FALSE, alpha = 0.4) + geom_jitter() +
#theme_pubr() + stat_compare_means(method = "anova") + labs(caption = paste("N = ", n_distinct(filter(dados_hemi_v1, !is.na(logAvgThickness))$SUBJ)))
#aov3 <- aov(logAvgThickness ~ Diagnostic, dados_hemi_v1)
#TukeyHSD(aov3)
# ggplot(dados_hemi_v1, aes(x = Diagnostic, y = logAvgThickness_age_decay, color = Diagnostic, fill = Diagnostic)) +
# geom_violin(trim = FALSE, alpha = 0.4) + geom_jitter() +
# theme_pubr() + stat_compare_means(method = "anova") + labs(caption = paste("N = ", n_distinct(filter(dados_hemi_v1, !is.na(logAvgThickness_age_decay))$SUBJ)))
#aov4 <- aov(logAvgThickness_age_decay ~ Diagnostic, dados_hemi_v1)
#TukeyHSD(aov4)
# ggplot(dados_hemi_v1, aes(x = Diagnostic, y = I, color = Diagnostic, fill = Diagnostic)) +
# geom_violin(trim = FALSE, alpha = 0.4) + geom_jitter() +
# theme_pubr() + stat_compare_means(method = "anova") + labs(caption = paste("N = ", n_distinct(filter(dados_hemi_v1, !is.na(logAvgThickness_age_decay))$SUBJ)))
#
# ggplot(dados_hemi_v1, aes(x = Diagnostic, y = I_age_decay, color = Diagnostic, fill = Diagnostic)) +
# geom_violin(trim = FALSE, alpha = 0.4) + geom_jitter() +
# theme_pubr() + stat_compare_means(method = "anova") + labs(caption = paste("N = ", n_distinct(filter(dados_hemi_v1, !is.na(logAvgThickness_age_decay))$SUBJ)))
#
# ggplot(dados_hemi_v1, aes(x = Diagnostic, y = S, color = Diagnostic, fill = Diagnostic)) +
# geom_violin(trim = FALSE, alpha = 0.4) + geom_jitter() +
# theme_pubr() + stat_compare_means(method = "anova") + labs(caption = paste("N = ", n_distinct(filter(dados_hemi_v1, !is.na(logAvgThickness_age_decay))$SUBJ)))
#
# ggplot(dados_hemi_v1, aes(x = Diagnostic, y = S_age_decay, color = Diagnostic, fill = Diagnostic)) +
# geom_violin(trim = FALSE, alpha = 0.4) + geom_jitter() +
# theme_pubr() + stat_compare_means(method = "anova") + labs(caption = paste("N = ", n_distinct(filter(dados_hemi_v1, !is.na(logAvgThickness_age_decay))$SUBJ)))
caret::featurePlot(x = train.data[, c("K", "logAvgThickness", "K_age_decay", "logAvgThickness_age_decay")], y = train.data$Diagnostic, plot = "box", scales = list(y = list(relation = "free"), x = list(rot = 90)), layout = c(4, 1))
caret::featurePlot(x = train.data[, c("K", "K_age_decay", "I", "I_age_decay", "S", "S_age_decay")], y = train.data$Diagnostic, plot = "box", scales = list(y = list(relation = "free"), x = list(rot = 90)), layout = c(3,2))
multinom1 <- multinom(Diagnostic ~ K + Age, data = train.data)
## # weights: 12 (6 variable)
## initial value 213.130784
## iter 10 value 142.733086
## iter 20 value 141.522216
## iter 30 value 135.869701
## iter 40 value 135.204635
## iter 50 value 135.109494
## iter 60 value 135.004106
## iter 70 value 134.949990
## iter 80 value 134.935244
## iter 90 value 134.928278
## iter 100 value 134.925355
## final value 134.925355
## stopped after 100 iterations
multinom2 <- multinom(Diagnostic ~ logAvgThickness + Age, data = train.data)
## # weights: 12 (6 variable)
## initial value 213.130784
## iter 10 value 139.961542
## iter 20 value 139.672869
## final value 137.784561
## converged
multinom10 <- multinom(Diagnostic ~ S + Age, data = train.data)
## # weights: 12 (6 variable)
## initial value 213.130784
## iter 10 value 142.591194
## iter 20 value 142.204890
## iter 30 value 141.270596
## iter 40 value 141.112098
## iter 50 value 141.097199
## final value 141.089795
## converged
multinom11 <- multinom(Diagnostic ~ I + Age, data = train.data)
## # weights: 12 (6 variable)
## initial value 213.130784
## iter 10 value 139.961266
## iter 20 value 139.803800
## iter 30 value 138.051783
## iter 40 value 137.777742
## iter 40 value 137.777742
## final value 137.777742
## converged
multinom0 <- multinom(Diagnostic ~ K + Age + ESC, data = train.data)
## # weights: 15 (8 variable)
## initial value 213.130784
## iter 10 value 134.116498
## iter 20 value 131.453188
## iter 30 value 129.237736
## iter 40 value 124.493246
## iter 50 value 124.078558
## iter 60 value 123.970662
## iter 70 value 123.868622
## iter 80 value 123.801584
## iter 90 value 123.732669
## iter 100 value 123.709843
## final value 123.709843
## stopped after 100 iterations
multinom0_2 <- multinom(Diagnostic ~ logAvgThickness + Age + ESC, data = train.data)
## # weights: 15 (8 variable)
## initial value 213.130784
## iter 10 value 132.749238
## iter 20 value 130.774868
## iter 30 value 129.107343
## iter 40 value 127.724807
## iter 50 value 127.590914
## iter 60 value 127.569589
## iter 70 value 127.554830
## iter 80 value 127.547652
## iter 90 value 127.539117
## iter 100 value 127.535291
## final value 127.535291
## stopped after 100 iterations
multinom_Gender1 <- multinom(Diagnostic ~ K + Age + Gender , data = train.data)
## # weights: 15 (8 variable)
## initial value 213.130784
## iter 10 value 140.087480
## iter 20 value 130.881728
## iter 30 value 128.755223
## iter 40 value 127.574431
## iter 50 value 127.074918
## iter 60 value 126.999856
## iter 70 value 126.824155
## iter 80 value 126.780775
## iter 90 value 126.731951
## iter 100 value 126.700153
## final value 126.700153
## stopped after 100 iterations
multinom_Gender2 <- multinom(Diagnostic ~ logAvgThickness + Age + Gender , data = train.data)
## # weights: 15 (8 variable)
## initial value 213.130784
## iter 10 value 137.019686
## iter 20 value 135.406470
## iter 30 value 134.647746
## iter 40 value 133.996241
## iter 50 value 133.939802
## iter 60 value 133.891576
## iter 70 value 133.853402
## iter 70 value 133.853402
## iter 70 value 133.853400
## final value 133.853400
## converged
multinom0_0 <- multinom(Diagnostic ~ K + S + I + Age, data = train.data)
## # weights: 18 (10 variable)
## initial value 213.130784
## iter 10 value 137.550017
## iter 20 value 132.662085
## iter 30 value 130.676258
## iter 40 value 129.813038
## final value 129.791092
## converged
# anova(multinom2, multinom1, test = "Chisq")
# anova(multinom0, multinom1, test = "Chisq")
#
# anova(multinom0_0, multinom1, test = "Chisq")
## da estatistica ##
m.multi.nova1 <-
multinom(Diagnostic ~ K + Age, data = train.data)
## # weights: 12 (6 variable)
## initial value 213.130784
## iter 10 value 142.733086
## iter 20 value 141.522216
## iter 30 value 135.869701
## iter 40 value 135.204635
## iter 50 value 135.109494
## iter 60 value 135.004106
## iter 70 value 134.949990
## iter 80 value 134.935244
## iter 90 value 134.928278
## iter 100 value 134.925355
## final value 134.925355
## stopped after 100 iterations
stargazer(m.multi.nova1, type = "text")
##
## ==============================================
## Dependent variable:
## ----------------------------
## AD MCI
## (1) (2)
## ----------------------------------------------
## K -73.439*** -14.440
## (3.324) (11.464)
##
## Age 0.258*** 0.118***
## (0.067) (0.032)
##
## Constant -59.859*** -16.673***
## (3.971) (6.078)
##
## ----------------------------------------------
## Akaike Inf. Crit. 281.851 281.851
## ==============================================
## Note: *p<0.1; **p<0.05; ***p<0.01
z1 <-
summary(m.multi.nova1)$coefficients / summary(m.multi.nova1)$standard.errors
p1 <- (1 - pnorm(abs(z1), 0, 1)) * 2
t(p1)
## AD MCI
## (Intercept) 0.0000000000 0.0060857025
## K 0.0000000000 0.2077896026
## Age 0.0001078354 0.0001855143
#Para facilitar a interpreta??o:
coef.multi1 = exp(coef(m.multi.nova1))
t(coef.multi1)
## AD MCI
## (Intercept) 1.008650e-26 5.741146e-08
## K 1.275907e-32 5.353058e-07
## Age 1.294764e+00 1.125566e+00
#Previsoes
predicted.classes.multi.nova1 <- m.multi.nova1 %>% predict(test.data, type = "class")
#Model accuracy
mean(predicted.classes.multi.nova1 == test.data$Diagnostic)
## [1] 0.7307692
# Summary
confusionMatrix(predicted.classes.multi.nova1, test.data$Diagnostic)
## Confusion Matrix and Statistics
##
## Reference
## Prediction CTL AD MCI
## CTL 30 2 8
## AD 0 3 1
## MCI 2 1 5
##
## Overall Statistics
##
## Accuracy : 0.7308
## 95% CI : (0.5898, 0.8443)
## No Information Rate : 0.6154
## P-Value [Acc > NIR] : 0.05604
##
## Kappa : 0.4348
##
## Mcnemar's Test P-Value : 0.13278
##
## Statistics by Class:
##
## Class: CTL Class: AD Class: MCI
## Sensitivity 0.9375 0.50000 0.35714
## Specificity 0.5000 0.97826 0.92105
## Pos Pred Value 0.7500 0.75000 0.62500
## Neg Pred Value 0.8333 0.93750 0.79545
## Prevalence 0.6154 0.11538 0.26923
## Detection Rate 0.5769 0.05769 0.09615
## Detection Prevalence 0.7692 0.07692 0.15385
## Balanced Accuracy 0.7188 0.73913 0.63910
#ROC
multiclass.roc(
as.numeric(test.data$Diagnostic),
as.numeric(predicted.classes.multi.nova1),
percent = F,
ci.alpha = 0.9,
stratified = FALSE,
plot = TRUE,
grid = FALSE,
legacy.axes = TRUE,
reuse.auc = TRUE,
print.auc = TRUE,
print.thres.col = "blue",
ci.type = "bars",
print.thres.cex = 0.7,
main = "ROC curve",
ylab = "Sensitivity (true positive rate)",
xlab = "1-Specificity (false positive rate)"
)
## Setting direction: controls < cases
## Setting direction: controls < cases
## Setting direction: controls > cases
##
## Call:
## multiclass.roc.default(response = as.numeric(test.data$Diagnostic), predictor = as.numeric(predicted.classes.multi.nova1), percent = F, ci.alpha = 0.9, stratified = FALSE, plot = TRUE, grid = FALSE, legacy.axes = TRUE, reuse.auc = TRUE, print.auc = TRUE, print.thres.col = "blue", ci.type = "bars", print.thres.cex = 0.7, main = "ROC curve", ylab = "Sensitivity (true positive rate)", xlab = "1-Specificity (false positive rate)")
##
## Data: as.numeric(predicted.classes.multi.nova1) with 3 levels of as.numeric(test.data$Diagnostic): 1, 2, 3.
## Multi-class area under the curve: 0.6677
m.multi.nova2 <-
multinom(Diagnostic ~ logAvgThickness + Age, data = train.data)
## # weights: 12 (6 variable)
## initial value 213.130784
## iter 10 value 139.961542
## iter 20 value 139.672869
## final value 137.784561
## converged
stargazer(m.multi.nova2, type = "text")
##
## ==============================================
## Dependent variable:
## ----------------------------
## AD MCI
## (1) (2)
## ----------------------------------------------
## logAvgThickness -48.394*** -22.859**
## (3.286) (11.304)
##
## Age 0.304*** 0.102***
## (0.066) (0.032)
##
## Constant -5.225 1.039
## (4.483) (5.520)
##
## ----------------------------------------------
## Akaike Inf. Crit. 287.569 287.569
## ==============================================
## Note: *p<0.1; **p<0.05; ***p<0.01
z2 <-
summary(m.multi.nova2)$coefficients / summary(m.multi.nova2)$standard.errors
p2 <- (1 - pnorm(abs(z2), 0, 1)) * 2
t(p2)
## AD MCI
## (Intercept) 2.437921e-01 0.850659019
## logAvgThickness 0.000000e+00 0.043151577
## Age 4.609332e-06 0.001588328
#Para facilitar a interpreta??o:
coef.multi2 = exp(coef(m.multi.nova2))
t(coef.multi2)
## AD MCI
## (Intercept) 5.380382e-03 2.827298e+00
## logAvgThickness 9.606729e-22 1.181233e-10
## Age 1.355814e+00 1.107784e+00
#Previsoes
predicted.classes.multi.nova2 <- m.multi.nova2 %>% predict(test.data, type = "class")
#Model accuracy
mean(predicted.classes.multi.nova2 == test.data$Diagnostic)
## [1] 0.6730769
# Summary
confusionMatrix(predicted.classes.multi.nova2, test.data$Diagnostic)
## Confusion Matrix and Statistics
##
## Reference
## Prediction CTL AD MCI
## CTL 30 2 8
## AD 0 3 4
## MCI 2 1 2
##
## Overall Statistics
##
## Accuracy : 0.6731
## 95% CI : (0.5289, 0.7967)
## No Information Rate : 0.6154
## P-Value [Acc > NIR] : 0.23993
##
## Kappa : 0.3262
##
## Mcnemar's Test P-Value : 0.06018
##
## Statistics by Class:
##
## Class: CTL Class: AD Class: MCI
## Sensitivity 0.9375 0.50000 0.14286
## Specificity 0.5000 0.91304 0.92105
## Pos Pred Value 0.7500 0.42857 0.40000
## Neg Pred Value 0.8333 0.93333 0.74468
## Prevalence 0.6154 0.11538 0.26923
## Detection Rate 0.5769 0.05769 0.03846
## Detection Prevalence 0.7692 0.13462 0.09615
## Balanced Accuracy 0.7188 0.70652 0.53195
#ROC
multiclass.roc(
as.numeric(test.data$Diagnostic),
as.numeric(predicted.classes.multi.nova2),
percent = F,
ci.alpha = 0.9,
stratified = FALSE,
plot = TRUE,
grid = TRUE,
legacy.axes = TRUE,
reuse.auc = TRUE,
print.auc = TRUE,
print.thres.col = "blue",
ci.type = "bars",
print.thres.cex = 0.7,
main = "ROC curve",
ylab = "Sensitivity (true positive rate)",
xlab = "1-Specificity (false positive rate)"
)
## Setting direction: controls < cases
## Setting direction: controls < cases
## Setting direction: controls > cases
##
## Call:
## multiclass.roc.default(response = as.numeric(test.data$Diagnostic), predictor = as.numeric(predicted.classes.multi.nova2), percent = F, ci.alpha = 0.9, stratified = FALSE, plot = TRUE, grid = TRUE, legacy.axes = TRUE, reuse.auc = TRUE, print.auc = TRUE, print.thres.col = "blue", ci.type = "bars", print.thres.cex = 0.7, main = "ROC curve", ylab = "Sensitivity (true positive rate)", xlab = "1-Specificity (false positive rate)")
##
## Data: as.numeric(predicted.classes.multi.nova2) with 3 levels of as.numeric(test.data$Diagnostic): 1, 2, 3.
## Multi-class area under the curve: 0.6892
m.multi.nova10 <-
multinom(Diagnostic ~ I + Age, data = train.data)
## # weights: 12 (6 variable)
## initial value 213.130784
## iter 10 value 139.961266
## iter 20 value 139.803800
## iter 30 value 138.051783
## iter 40 value 137.777742
## iter 40 value 137.777742
## final value 137.777742
## converged
stargazer(m.multi.nova10, type = "text")
##
## ==============================================
## Dependent variable:
## ----------------------------
## AD MCI
## (1) (2)
## ----------------------------------------------
## I -10.525*** -1.937***
## (0.471) (0.205)
##
## Age 0.314*** 0.118***
## (0.065) (0.030)
##
## Constant 84.006*** 11.037***
## (0.045) (0.019)
##
## ----------------------------------------------
## Akaike Inf. Crit. 287.555 287.555
## ==============================================
## Note: *p<0.1; **p<0.05; ***p<0.01
z10 <-
summary(m.multi.nova10)$coefficients / summary(m.multi.nova10)$standard.errors
p10 <- (1 - pnorm(abs(z10), 0, 1)) * 2
t(p10)
## AD MCI
## (Intercept) 0.000000e+00 0.000000e+00
## I 0.000000e+00 0.000000e+00
## Age 1.458784e-06 9.671853e-05
#Para facilitar a interpreta??o:
coef.multi10 = exp(coef(m.multi.nova10))
t(coef.multi10)
## AD MCI
## (Intercept) 3.042939e+36 6.215508e+04
## I 2.686391e-05 1.441781e-01
## Age 1.368984e+00 1.125051e+00
#Previsoes
predicted.classes.multi.nova10 <- m.multi.nova10 %>% predict(test.data, type = "class")
#Model accuracy
mean(predicted.classes.multi.nova10 == test.data$Diagnostic)
## [1] 0.6538462
# Summary
confusionMatrix(predicted.classes.multi.nova10, test.data$Diagnostic)
## Confusion Matrix and Statistics
##
## Reference
## Prediction CTL AD MCI
## CTL 30 2 8
## AD 0 2 4
## MCI 2 2 2
##
## Overall Statistics
##
## Accuracy : 0.6538
## 95% CI : (0.5091, 0.7803)
## No Information Rate : 0.6154
## P-Value [Acc > NIR] : 0.33801
##
## Kappa : 0.2822
##
## Mcnemar's Test P-Value : 0.09933
##
## Statistics by Class:
##
## Class: CTL Class: AD Class: MCI
## Sensitivity 0.9375 0.33333 0.14286
## Specificity 0.5000 0.91304 0.89474
## Pos Pred Value 0.7500 0.33333 0.33333
## Neg Pred Value 0.8333 0.91304 0.73913
## Prevalence 0.6154 0.11538 0.26923
## Detection Rate 0.5769 0.03846 0.03846
## Detection Prevalence 0.7692 0.11538 0.11538
## Balanced Accuracy 0.7188 0.62319 0.51880
#ROC
multiclass.roc(
as.numeric(test.data$Diagnostic),
as.numeric(predicted.classes.multi.nova10),
percent = F,
ci.alpha = 0.9,
stratified = FALSE,
plot = TRUE,
grid = TRUE,
legacy.axes = TRUE,
reuse.auc = TRUE,
print.auc = TRUE,
print.thres.col = "blue",
ci.type = "bars",
print.thres.cex = 0.7,
main = "ROC curve",
ylab = "Sensitivity (true positive rate)",
xlab = "1-Specificity (false positive rate)"
)
## Setting direction: controls < cases
## Setting direction: controls < cases
## Setting direction: controls > cases
##
## Call:
## multiclass.roc.default(response = as.numeric(test.data$Diagnostic), predictor = as.numeric(predicted.classes.multi.nova10), percent = F, ci.alpha = 0.9, stratified = FALSE, plot = TRUE, grid = TRUE, legacy.axes = TRUE, reuse.auc = TRUE, print.auc = TRUE, print.thres.col = "blue", ci.type = "bars", print.thres.cex = 0.7, main = "ROC curve", ylab = "Sensitivity (true positive rate)", xlab = "1-Specificity (false positive rate)")
##
## Data: as.numeric(predicted.classes.multi.nova10) with 3 levels of as.numeric(test.data$Diagnostic): 1, 2, 3.
## Multi-class area under the curve: 0.7029
m.multi.nova11 <-
multinom(Diagnostic ~ S + Age, data = train.data)
## # weights: 12 (6 variable)
## initial value 213.130784
## iter 10 value 142.591194
## iter 20 value 142.204890
## iter 30 value 141.270596
## iter 40 value 141.112098
## iter 50 value 141.097199
## final value 141.089795
## converged
stargazer(m.multi.nova10, type = "text")
##
## ==============================================
## Dependent variable:
## ----------------------------
## AD MCI
## (1) (2)
## ----------------------------------------------
## I -10.525*** -1.937***
## (0.471) (0.205)
##
## Age 0.314*** 0.118***
## (0.065) (0.030)
##
## Constant 84.006*** 11.037***
## (0.045) (0.019)
##
## ----------------------------------------------
## Akaike Inf. Crit. 287.555 287.555
## ==============================================
## Note: *p<0.1; **p<0.05; ***p<0.01
z11 <-
summary(m.multi.nova11)$coefficients / summary(m.multi.nova11)$standard.errors
p11 <- (1 - pnorm(abs(z11), 0, 1)) * 2
t(p11)
## AD MCI
## (Intercept) 3.033129e-13 0.0927816399
## S 2.726861e-01 0.3424980727
## Age 2.452440e-07 0.0001226172
#Para facilitar a interpreta??o:
coef.multi11 = exp(coef(m.multi.nova11))
t(coef.multi11)
## AD MCI
## (Intercept) 3.073631e-15 5.567407e-10
## S 2.193593e+00 3.830310e+00
## Age 1.399446e+00 1.125321e+00
#Previsoes
predicted.classes.multi.nova11 <- m.multi.nova11 %>% predict(test.data, type = "class")
#Model accuracy
mean(predicted.classes.multi.nova11 == test.data$Diagnostic)
## [1] 0.6538462
# Summary
confusionMatrix(predicted.classes.multi.nova11, test.data$Diagnostic)
## Confusion Matrix and Statistics
##
## Reference
## Prediction CTL AD MCI
## CTL 30 2 8
## AD 2 2 4
## MCI 0 2 2
##
## Overall Statistics
##
## Accuracy : 0.6538
## 95% CI : (0.5091, 0.7803)
## No Information Rate : 0.6154
## P-Value [Acc > NIR] : 0.33801
##
## Kappa : 0.2909
##
## Mcnemar's Test P-Value : 0.03407
##
## Statistics by Class:
##
## Class: CTL Class: AD Class: MCI
## Sensitivity 0.9375 0.33333 0.14286
## Specificity 0.5000 0.86957 0.94737
## Pos Pred Value 0.7500 0.25000 0.50000
## Neg Pred Value 0.8333 0.90909 0.75000
## Prevalence 0.6154 0.11538 0.26923
## Detection Rate 0.5769 0.03846 0.03846
## Detection Prevalence 0.7692 0.15385 0.07692
## Balanced Accuracy 0.7188 0.60145 0.54511
#ROC
multiclass.roc(
as.numeric(test.data$Diagnostic),
as.numeric(predicted.classes.multi.nova11),
percent = F,
ci.alpha = 0.9,
stratified = FALSE,
plot = TRUE,
grid = TRUE,
legacy.axes = TRUE,
reuse.auc = TRUE,
print.auc = TRUE,
print.thres.col = "blue",
ci.type = "bars",
print.thres.cex = 0.7,
main = "ROC curve",
ylab = "Sensitivity (true positive rate)",
xlab = "1-Specificity (false positive rate)"
)
## Setting direction: controls < cases
## Setting direction: controls < cases
## Setting direction: controls > cases
##
## Call:
## multiclass.roc.default(response = as.numeric(test.data$Diagnostic), predictor = as.numeric(predicted.classes.multi.nova11), percent = F, ci.alpha = 0.9, stratified = FALSE, plot = TRUE, grid = TRUE, legacy.axes = TRUE, reuse.auc = TRUE, print.auc = TRUE, print.thres.col = "blue", ci.type = "bars", print.thres.cex = 0.7, main = "ROC curve", ylab = "Sensitivity (true positive rate)", xlab = "1-Specificity (false positive rate)")
##
## Data: as.numeric(predicted.classes.multi.nova11) with 3 levels of as.numeric(test.data$Diagnostic): 1, 2, 3.
## Multi-class area under the curve: 0.7143
m.multi.nova0 <-
multinom(Diagnostic ~ K + Age + ESC, data = train.data)
## # weights: 15 (8 variable)
## initial value 213.130784
## iter 10 value 134.116498
## iter 20 value 131.453188
## iter 30 value 129.237736
## iter 40 value 124.493246
## iter 50 value 124.078558
## iter 60 value 123.970662
## iter 70 value 123.868622
## iter 80 value 123.801584
## iter 90 value 123.732669
## iter 100 value 123.709843
## final value 123.709843
## stopped after 100 iterations
stargazer(m.multi.nova0, type = "text")
##
## ==============================================
## Dependent variable:
## ----------------------------
## AD MCI
## (1) (2)
## ----------------------------------------------
## K -91.970*** -15.980
## (3.101) (11.961)
##
## Age 0.205*** 0.088***
## (0.071) (0.032)
##
## ESC -0.569*** -0.262***
## (0.135) (0.083)
##
## Constant -58.276*** -11.529*
## (4.506) (6.243)
##
## ----------------------------------------------
## Akaike Inf. Crit. 263.420 263.420
## ==============================================
## Note: *p<0.1; **p<0.05; ***p<0.01
z0 <-
summary(m.multi.nova0)$coefficients / summary(m.multi.nova0)$standard.errors
p0 <- (1 - pnorm(abs(z0), 0, 1)) * 2
t(p0)
## AD MCI
## (Intercept) 0.000000e+00 0.064771820
## K 0.000000e+00 0.181567362
## Age 3.777400e-03 0.005543926
## ESC 2.548523e-05 0.001664734
#Para facilitar a interpreta??o:
coef.multi0 = exp(coef(m.multi.nova0))
t(coef.multi0)
## AD MCI
## (Intercept) 4.910548e-26 9.840553e-06
## K 1.142372e-40 1.148650e-07
## Age 1.226948e+00 1.091918e+00
## ESC 5.661688e-01 7.698463e-01
#Previsoes
predicted.classes.multi.nova0 <- m.multi.nova0 %>% predict(test.data, type = "class")
#Model accuracy
mean(predicted.classes.multi.nova0 == test.data$Diagnostic)
## [1] 0.7115385
# Summary
confusionMatrix(predicted.classes.multi.nova0, test.data$Diagnostic)
## Confusion Matrix and Statistics
##
## Reference
## Prediction CTL AD MCI
## CTL 27 2 2
## AD 0 1 3
## MCI 5 3 9
##
## Overall Statistics
##
## Accuracy : 0.7115
## 95% CI : (0.5692, 0.8287)
## No Information Rate : 0.6154
## P-Value [Acc > NIR] : 0.09824
##
## Kappa : 0.4621
##
## Mcnemar's Test P-Value : 0.34964
##
## Statistics by Class:
##
## Class: CTL Class: AD Class: MCI
## Sensitivity 0.8438 0.16667 0.6429
## Specificity 0.8000 0.93478 0.7895
## Pos Pred Value 0.8710 0.25000 0.5294
## Neg Pred Value 0.7619 0.89583 0.8571
## Prevalence 0.6154 0.11538 0.2692
## Detection Rate 0.5192 0.01923 0.1731
## Detection Prevalence 0.5962 0.07692 0.3269
## Balanced Accuracy 0.8219 0.55072 0.7162
#ROC
multiclass.roc(
as.numeric(test.data$Diagnostic),
as.numeric(predicted.classes.multi.nova0),
percent = F,
ci.alpha = 0.9,
stratified = FALSE,
plot = TRUE,
grid = TRUE,
legacy.axes = TRUE,
reuse.auc = TRUE,
print.auc = TRUE,
print.thres.col = "blue",
ci.type = "bars",
print.thres.cex = 0.7,
main = "ROC curve",
ylab = "Sensitivity (true positive rate)",
xlab = "1-Specificity (false positive rate)"
)
## Setting direction: controls < cases
## Setting direction: controls < cases
## Setting direction: controls < cases
##
## Call:
## multiclass.roc.default(response = as.numeric(test.data$Diagnostic), predictor = as.numeric(predicted.classes.multi.nova0), percent = F, ci.alpha = 0.9, stratified = FALSE, plot = TRUE, grid = TRUE, legacy.axes = TRUE, reuse.auc = TRUE, print.auc = TRUE, print.thres.col = "blue", ci.type = "bars", print.thres.cex = 0.7, main = "ROC curve", ylab = "Sensitivity (true positive rate)", xlab = "1-Specificity (false positive rate)")
##
## Data: as.numeric(predicted.classes.multi.nova0) with 3 levels of as.numeric(test.data$Diagnostic): 1, 2, 3.
## Multi-class area under the curve: 0.7237
m.multi.nova0_2 <-
multinom(Diagnostic ~ logAvgThickness + Age + ESC, data = train.data)
## # weights: 15 (8 variable)
## initial value 213.130784
## iter 10 value 132.749238
## iter 20 value 130.774868
## iter 30 value 129.107343
## iter 40 value 127.724807
## iter 50 value 127.590914
## iter 60 value 127.569589
## iter 70 value 127.554830
## iter 80 value 127.547652
## iter 90 value 127.539117
## iter 100 value 127.535291
## final value 127.535291
## stopped after 100 iterations
stargazer(m.multi.nova0_2, type = "text")
##
## ==============================================
## Dependent variable:
## ----------------------------
## AD MCI
## (1) (2)
## ----------------------------------------------
## logAvgThickness -67.690*** -17.561
## (2.917) (11.642)
##
## Age 0.223*** 0.082**
## (0.065) (0.032)
##
## ESC -0.510*** -0.256***
## (0.127) (0.082)
##
## Constant 15.068*** 4.165
## (4.478) (5.908)
##
## ----------------------------------------------
## Akaike Inf. Crit. 271.071 271.071
## ==============================================
## Note: *p<0.1; **p<0.05; ***p<0.01
z0_2 <-
summary(m.multi.nova0_2)$coefficients / summary(m.multi.nova0_2)$standard.errors
p0_2 <- (1 - pnorm(abs(z0_2), 0, 1)) * 2
t(p0_2)
## AD MCI
## (Intercept) 7.649503e-04 0.480802604
## logAvgThickness 0.000000e+00 0.131473759
## Age 6.431381e-04 0.011507258
## ESC 5.683326e-05 0.001850566
#Para facilitar a interpreta??o:
coef.multi0_2 = exp(coef(m.multi.nova0_2))
t(coef.multi0_2)
## AD MCI
## (Intercept) 3.498279e+06 6.442058e+01
## logAvgThickness 4.006028e-30 2.363531e-08
## Age 1.249800e+00 1.085065e+00
## ESC 6.004399e-01 7.739285e-01
#Previsoes
predicted.classes.multi.nova0_2 <- m.multi.nova0_2 %>% predict(test.data, type = "class")
#Model accuracy
mean(predicted.classes.multi.nova0_2 == test.data$Diagnostic)
## [1] 0.6538462
# Summary
confusionMatrix(predicted.classes.multi.nova0_2, test.data$Diagnostic)
## Confusion Matrix and Statistics
##
## Reference
## Prediction CTL AD MCI
## CTL 26 2 3
## AD 0 1 4
## MCI 6 3 7
##
## Overall Statistics
##
## Accuracy : 0.6538
## 95% CI : (0.5091, 0.7803)
## No Information Rate : 0.6154
## P-Value [Acc > NIR] : 0.3380
##
## Kappa : 0.358
##
## Mcnemar's Test P-Value : 0.3701
##
## Statistics by Class:
##
## Class: CTL Class: AD Class: MCI
## Sensitivity 0.8125 0.16667 0.5000
## Specificity 0.7500 0.91304 0.7632
## Pos Pred Value 0.8387 0.20000 0.4375
## Neg Pred Value 0.7143 0.89362 0.8056
## Prevalence 0.6154 0.11538 0.2692
## Detection Rate 0.5000 0.01923 0.1346
## Detection Prevalence 0.5962 0.09615 0.3077
## Balanced Accuracy 0.7812 0.53986 0.6316
#ROC
multiclass.roc(
as.numeric(test.data$Diagnostic),
as.numeric(predicted.classes.multi.nova0_2),
percent = F,
ci.alpha = 0.9,
stratified = FALSE,
plot = TRUE,
grid = TRUE,
legacy.axes = TRUE,
reuse.auc = TRUE,
print.auc = TRUE,
print.thres.col = "blue",
ci.type = "bars",
print.thres.cex = 0.7,
main = "ROC curve",
ylab = "Sensitivity (true positive rate)",
xlab = "1-Specificity (false positive rate)"
)
## Setting direction: controls < cases
## Setting direction: controls < cases
## Setting direction: controls < cases
##
## Call:
## multiclass.roc.default(response = as.numeric(test.data$Diagnostic), predictor = as.numeric(predicted.classes.multi.nova0_2), percent = F, ci.alpha = 0.9, stratified = FALSE, plot = TRUE, grid = TRUE, legacy.axes = TRUE, reuse.auc = TRUE, print.auc = TRUE, print.thres.col = "blue", ci.type = "bars", print.thres.cex = 0.7, main = "ROC curve", ylab = "Sensitivity (true positive rate)", xlab = "1-Specificity (false positive rate)")
##
## Data: as.numeric(predicted.classes.multi.nova0_2) with 3 levels of as.numeric(test.data$Diagnostic): 1, 2, 3.
## Multi-class area under the curve: 0.6753
m.multi.nova0_0 <-
multinom(Diagnostic ~ K + I + S + Age, data = train.data)
## # weights: 18 (10 variable)
## initial value 213.130784
## iter 10 value 137.550017
## iter 20 value 132.662085
## iter 30 value 130.676258
## iter 40 value 129.813038
## final value 129.791092
## converged
stargazer(m.multi.nova0_0, type = "text")
##
## ==============================================
## Dependent variable:
## ----------------------------
## AD MCI
## (1) (2)
## ----------------------------------------------
## K -80.489*** -12.737
## (2.152) (11.995)
##
## I -11.731*** -2.646
## (3.051) (1.614)
##
## S 3.328 1.939
## (3.488) (1.967)
##
## Age 0.236*** 0.107***
## (0.067) (0.033)
##
## Constant 28.518*** -5.376***
## (0.269) (1.117)
##
## ----------------------------------------------
## Akaike Inf. Crit. 279.582 279.582
## ==============================================
## Note: *p<0.1; **p<0.05; ***p<0.01
z0_0 <-
(summary(m.multi.nova0_0)$coefficients)/(summary(m.multi.nova0_0)$standard.errors)
p0_0 <- (1 - pnorm(abs(z0_0), 0, 1)) * 2
t(p0_0)
## AD MCI
## (Intercept) 0.0000000000 1.489861e-06
## K 0.0000000000 2.882753e-01
## I 0.0001205562 1.011878e-01
## S 0.3399794339 3.243309e-01
## Age 0.0004607455 1.048672e-03
#Para facilitar a interpreta??o:
coef.multi0_0 = exp(coef(m.multi.nova0_0))
t(coef.multi0_0)
## AD MCI
## (Intercept) 2.427005e+12 4.626382e-03
## K 1.107225e-35 2.938945e-06
## I 8.036782e-06 7.092752e-02
## S 2.787740e+01 6.948562e+00
## Age 1.266412e+00 1.113474e+00
#Previsoes
predicted.classes.multi.nova0_0 <- m.multi.nova0_0 %>% predict(test.data, type = "class")
#Model accuracy
mean(predicted.classes.multi.nova0_0 == test.data$Diagnostic)
## [1] 0.7115385
# Summary
confusionMatrix(predicted.classes.multi.nova0_0, test.data$Diagnostic)
## Confusion Matrix and Statistics
##
## Reference
## Prediction CTL AD MCI
## CTL 30 2 8
## AD 0 3 2
## MCI 2 1 4
##
## Overall Statistics
##
## Accuracy : 0.7115
## 95% CI : (0.5692, 0.8287)
## No Information Rate : 0.6154
## P-Value [Acc > NIR] : 0.09824
##
## Kappa : 0.3981
##
## Mcnemar's Test P-Value : 0.11490
##
## Statistics by Class:
##
## Class: CTL Class: AD Class: MCI
## Sensitivity 0.9375 0.50000 0.28571
## Specificity 0.5000 0.95652 0.92105
## Pos Pred Value 0.7500 0.60000 0.57143
## Neg Pred Value 0.8333 0.93617 0.77778
## Prevalence 0.6154 0.11538 0.26923
## Detection Rate 0.5769 0.05769 0.07692
## Detection Prevalence 0.7692 0.09615 0.13462
## Balanced Accuracy 0.7188 0.72826 0.60338
#ROC
multiclass.roc(
as.numeric(test.data$Diagnostic),
as.numeric(predicted.classes.multi.nova0_0),
percent = F,
ci.alpha = 0.9,
stratified = FALSE,
plot = TRUE,
grid = FALSE,
legacy.axes = TRUE,
reuse.auc = TRUE,
print.auc = TRUE,
print.thres.col = "blue",
ci.type = "bars",
print.thres.cex = 0.7,
main = "ROC curve",
ylab = "Sensitivity (true positive rate)",
xlab = "0_0-Specificity (false positive rate)"
)
## Setting direction: controls < cases
## Setting direction: controls < cases
## Setting direction: controls > cases
##
## Call:
## multiclass.roc.default(response = as.numeric(test.data$Diagnostic), predictor = as.numeric(predicted.classes.multi.nova0_0), percent = F, ci.alpha = 0.9, stratified = FALSE, plot = TRUE, grid = FALSE, legacy.axes = TRUE, reuse.auc = TRUE, print.auc = TRUE, print.thres.col = "blue", ci.type = "bars", print.thres.cex = 0.7, main = "ROC curve", ylab = "Sensitivity (true positive rate)", xlab = "0_0-Specificity (false positive rate)")
##
## Data: as.numeric(predicted.classes.multi.nova0_0) with 3 levels of as.numeric(test.data$Diagnostic): 1, 2, 3.
## Multi-class area under the curve: 0.6749
m.multi.nova.Gender1 <-
multinom(Diagnostic ~ K + Age + Gender, data = train.data)
## # weights: 15 (8 variable)
## initial value 213.130784
## iter 10 value 140.087480
## iter 20 value 130.881728
## iter 30 value 128.755223
## iter 40 value 127.574431
## iter 50 value 127.074918
## iter 60 value 126.999856
## iter 70 value 126.824155
## iter 80 value 126.780775
## iter 90 value 126.731951
## iter 100 value 126.700153
## final value 126.700153
## stopped after 100 iterations
stargazer(m.multi.nova.Gender1, type = "text")
##
## ==============================================
## Dependent variable:
## ----------------------------
## AD MCI
## (1) (2)
## ----------------------------------------------
## K -111.920*** -9.049
## (2.623) (12.637)
##
## Age 0.323*** 0.115***
## (0.071) (0.031)
##
## GenderMASC -2.505*** 0.654*
## (0.824) (0.370)
##
## Constant -84.715*** -13.820**
## (4.028) (6.553)
##
## ----------------------------------------------
## Akaike Inf. Crit. 269.400 269.400
## ==============================================
## Note: *p<0.1; **p<0.05; ***p<0.01
z.Gender1 <-
summary(m.multi.nova.Gender1)$coefficients / summary(m.multi.nova.Gender1)$standard.errors
p.Gender1 <- (1 - pnorm(abs(z.Gender1), 0, 1)) * 2
t(p.Gender1)
## AD MCI
## (Intercept) 0.000000e+00 0.0349452344
## K 0.000000e+00 0.4739590711
## Age 5.027215e-06 0.0002724502
## GenderMASC 2.353488e-03 0.0770532890
#Para facilitar a interpreta??o:
coef.multi.Gender1 = exp(coef(m.multi.nova.Gender1))
t(coef.multi.Gender1)
## AD MCI
## (Intercept) 1.617077e-37 9.952089e-07
## K 2.475032e-49 1.175610e-04
## Age 1.380929e+00 1.121411e+00
## GenderMASC 8.164061e-02 1.923854e+00
#Previsoes
predicted.classes.multi.nova.Gender1 <- m.multi.nova.Gender1 %>% predict(test.data, type = "class")
#Model accuracy
mean(predicted.classes.multi.nova.Gender1 == test.data$Diagnostic)
## [1] 0.6730769
# Summary
confusionMatrix(predicted.classes.multi.nova.Gender1, test.data$Diagnostic)
## Confusion Matrix and Statistics
##
## Reference
## Prediction CTL AD MCI
## CTL 30 2 8
## AD 2 3 4
## MCI 0 1 2
##
## Overall Statistics
##
## Accuracy : 0.6731
## 95% CI : (0.5289, 0.7967)
## No Information Rate : 0.6154
## P-Value [Acc > NIR] : 0.23993
##
## Kappa : 0.3343
##
## Mcnemar's Test P-Value : 0.02034
##
## Statistics by Class:
##
## Class: CTL Class: AD Class: MCI
## Sensitivity 0.9375 0.50000 0.14286
## Specificity 0.5000 0.86957 0.97368
## Pos Pred Value 0.7500 0.33333 0.66667
## Neg Pred Value 0.8333 0.93023 0.75510
## Prevalence 0.6154 0.11538 0.26923
## Detection Rate 0.5769 0.05769 0.03846
## Detection Prevalence 0.7692 0.17308 0.05769
## Balanced Accuracy 0.7188 0.68478 0.55827
#ROC
multiclass.roc(
as.numeric(test.data$Diagnostic),
as.numeric(predicted.classes.multi.nova.Gender1),
percent = F,
ci.alpha = 0.9,
stratified = FALSE,
plot = TRUE,
grid = FALSE,
legacy.axes = TRUE,
reuse.auc = TRUE,
print.auc = TRUE,
print.thres.col = "blue",
ci.type = "bars",
print.thres.cex = 0.7,
main = "ROC curve",
ylab = "Sensitivity (true positive rate)",
xlab = "1-Specificity (false positive rate)"
)
## Setting direction: controls < cases
## Setting direction: controls < cases
## Setting direction: controls > cases
##
## Call:
## multiclass.roc.default(response = as.numeric(test.data$Diagnostic), predictor = as.numeric(predicted.classes.multi.nova.Gender1), percent = F, ci.alpha = 0.9, stratified = FALSE, plot = TRUE, grid = FALSE, legacy.axes = TRUE, reuse.auc = TRUE, print.auc = TRUE, print.thres.col = "blue", ci.type = "bars", print.thres.cex = 0.7, main = "ROC curve", ylab = "Sensitivity (true positive rate)", xlab = "1-Specificity (false positive rate)")
##
## Data: as.numeric(predicted.classes.multi.nova.Gender1) with 3 levels of as.numeric(test.data$Diagnostic): 1, 2, 3.
## Multi-class area under the curve: 0.7006
m.multi.nova.Gender2 <-
multinom(Diagnostic ~ logAvgThickness_age_decay + Age + Gender, data = train.data)
## # weights: 15 (8 variable)
## initial value 213.130784
## iter 10 value 137.089403
## iter 20 value 135.484139
## iter 30 value 134.746706
## iter 40 value 133.964197
## iter 50 value 133.911487
## iter 60 value 133.880723
## iter 70 value 133.845886
## iter 70 value 133.845886
## final value 133.845882
## converged
stargazer(m.multi.nova.Gender2, type = "text")
##
## ======================================================
## Dependent variable:
## ----------------------------
## AD MCI
## (1) (2)
## ------------------------------------------------------
## logAvgThickness_age_decay -57.735*** -18.876
## (3.515) (11.642)
##
## Age 0.350*** 0.117***
## (0.068) (0.030)
##
## GenderMASC -0.831 0.702*
## (0.696) (0.364)
##
## Constant -2.254 -1.055
## (4.556) (5.441)
##
## ------------------------------------------------------
## Akaike Inf. Crit. 283.692 283.692
## ======================================================
## Note: *p<0.1; **p<0.05; ***p<0.01
z.Gender2 <-
summary(m.multi.nova.Gender2)$coefficients / summary(m.multi.nova.Gender2)$standard.errors
p.Gender2 <- (1 - pnorm(abs(z.Gender2), 0, 1)) * 2
t(p.Gender2)
## AD MCI
## (Intercept) 6.207591e-01 0.84627477
## logAvgThickness_age_decay 0.000000e+00 0.10495102
## Age 3.028346e-07 0.00012565
## GenderMASC 2.322306e-01 0.05375627
#Para facilitar a interpreta??o:
coef.multi.Gender2 = exp(coef(m.multi.nova.Gender2))
t(coef.multi.Gender2)
## AD MCI
## (Intercept) 1.049577e-01 3.482246e-01
## logAvgThickness_age_decay 8.432825e-26 6.343201e-09
## Age 1.418524e+00 1.123768e+00
## GenderMASC 4.355967e-01 2.018439e+00
#Previsoes
predicted.classes.multi.nova.Gender2 <- m.multi.nova.Gender2 %>% predict(test.data, type = "class")
#Model accuracy
mean(predicted.classes.multi.nova.Gender2 == test.data$Diagnostic)
## [1] 0.6346154
# Summary
confusionMatrix(predicted.classes.multi.nova.Gender2, test.data$Diagnostic)
## Confusion Matrix and Statistics
##
## Reference
## Prediction CTL AD MCI
## CTL 29 1 8
## AD 1 2 4
## MCI 2 3 2
##
## Overall Statistics
##
## Accuracy : 0.6346
## 95% CI : (0.4896, 0.7638)
## No Information Rate : 0.6154
## P-Value [Acc > NIR] : 0.4477
##
## Kappa : 0.2671
##
## Mcnemar's Test P-Value : 0.2906
##
## Statistics by Class:
##
## Class: CTL Class: AD Class: MCI
## Sensitivity 0.9062 0.33333 0.14286
## Specificity 0.5500 0.89130 0.86842
## Pos Pred Value 0.7632 0.28571 0.28571
## Neg Pred Value 0.7857 0.91111 0.73333
## Prevalence 0.6154 0.11538 0.26923
## Detection Rate 0.5577 0.03846 0.03846
## Detection Prevalence 0.7308 0.13462 0.13462
## Balanced Accuracy 0.7281 0.61232 0.50564
#ROC
multiclass.roc(
as.numeric(test.data$Diagnostic),
as.numeric(predicted.classes.multi.nova.Gender2),
percent = F,
ci.alpha = 0.9,
stratified = FALSE,
plot = TRUE,
grid = FALSE,
legacy.axes = TRUE,
reuse.auc = TRUE,
print.auc = TRUE,
print.thres.col = "blue",
ci.type = "bars",
print.thres.cex = 0.7,
main = "ROC curve",
ylab = "Sensitivity (true positive rate)",
xlab = "1-Specificity (false positive rate)"
)
## Setting direction: controls < cases
## Setting direction: controls < cases
## Setting direction: controls > cases
##
## Call:
## multiclass.roc.default(response = as.numeric(test.data$Diagnostic), predictor = as.numeric(predicted.classes.multi.nova.Gender2), percent = F, ci.alpha = 0.9, stratified = FALSE, plot = TRUE, grid = FALSE, legacy.axes = TRUE, reuse.auc = TRUE, print.auc = TRUE, print.thres.col = "blue", ci.type = "bars", print.thres.cex = 0.7, main = "ROC curve", ylab = "Sensitivity (true positive rate)", xlab = "1-Specificity (false positive rate)")
##
## Data: as.numeric(predicted.classes.multi.nova.Gender2) with 3 levels of as.numeric(test.data$Diagnostic): 1, 2, 3.
## Multi-class area under the curve: 0.7593
# caret::featurePlot(x = train.data[, c("K", "logAvgThickness", "K_age_decay", "logAvgThickness_age_decay")], y = train.data$Diagnostic, plot = "box", scales = list(y = list(relation = "free"), x = list(rot = 90)), layout = c(4, 1))
multinom4 <- multinom(Diagnostic ~ K_age_decay, data = train.data)
## # weights: 9 (4 variable)
## initial value 213.130784
## iter 10 value 160.339404
## iter 20 value 157.766764
## iter 30 value 157.471142
## iter 40 value 157.426261
## iter 50 value 157.413679
## final value 157.410096
## converged
multinom5 <- multinom(Diagnostic ~ logAvgThickness_age_decay, data = train.data)
## # weights: 9 (4 variable)
## initial value 213.130784
## iter 10 value 166.470640
## iter 20 value 163.920245
## iter 30 value 163.853959
## iter 40 value 163.846199
## iter 50 value 163.844400
## final value 163.844049
## converged
multinom12 <- multinom(Diagnostic ~ I_age_decay + logAvgThickness_age_decay, data = train.data)
## # weights: 12 (6 variable)
## initial value 213.130784
## iter 10 value 167.178673
## iter 20 value 163.976034
## iter 30 value 163.556687
## iter 40 value 163.416185
## iter 50 value 163.335207
## iter 60 value 163.301769
## iter 70 value 163.252866
## iter 80 value 163.216110
## iter 90 value 163.181091
## iter 100 value 163.164625
## final value 163.164625
## stopped after 100 iterations
multinom13 <- multinom(Diagnostic ~ S_age_decay + logAvgThickness_age_decay, data = train.data)
## # weights: 12 (6 variable)
## initial value 213.130784
## iter 10 value 166.504894
## iter 20 value 164.603387
## iter 30 value 163.295075
## iter 40 value 162.945479
## iter 50 value 162.898064
## iter 60 value 162.812820
## iter 70 value 162.808396
## iter 80 value 162.799822
## iter 80 value 162.799822
## final value 162.799818
## converged
multinom0_0_0 <- multinom(Diagnostic ~ K_age_decay + I_age_decay + S_age_decay, data = train.data)
## # weights: 15 (8 variable)
## initial value 213.130784
## iter 10 value 160.519908
## iter 20 value 157.322580
## iter 30 value 154.778186
## iter 40 value 154.183429
## iter 50 value 154.049788
## iter 60 value 154.014659
## iter 70 value 154.005873
## iter 70 value 154.005872
## final value 154.005865
## converged
# anova(multinom5, multinom4, test = "Chisq")
#
# anova(multinom0_0_0, multinom4, test = "Chisq")
## da estatistica ##
m.multi.nova4 <-
multinom(Diagnostic ~ K_age_decay, data = train.data)
## # weights: 9 (4 variable)
## initial value 213.130784
## iter 10 value 160.339404
## iter 20 value 157.766764
## iter 30 value 157.471142
## iter 40 value 157.426261
## iter 50 value 157.413679
## final value 157.410096
## converged
stargazer(m.multi.nova4, type = "text")
##
## ==============================================
## Dependent variable:
## ----------------------------
## AD MCI
## (1) (2)
## ----------------------------------------------
## K_age_decay -91.427*** -16.320
## (20.718) (11.747)
##
## Constant -48.893*** -9.135
## (10.786) (5.971)
##
## ----------------------------------------------
## Akaike Inf. Crit. 322.820 322.820
## ==============================================
## Note: *p<0.1; **p<0.05; ***p<0.01
z4 <-
summary(m.multi.nova4)$coefficients / summary(m.multi.nova4)$standard.errors
p4 <- (1 - pnorm(abs(z4), 0, 1)) * 2
t(p4)
## AD MCI
## (Intercept) 5.817585e-06 0.1260618
## K_age_decay 1.019664e-05 0.1647492
#Para facilitar a interpreta??o:
coef.multi4 = exp(coef(m.multi.nova4))
t(coef.multi4)
## AD MCI
## (Intercept) 5.837013e-22 1.078771e-04
## K_age_decay 1.965859e-40 8.168296e-08
#Previsoes
predicted.classes.multi.nova4 <- m.multi.nova4 %>% predict(test.data, type = "class")
#Model accuracy
mean(predicted.classes.multi.nova4 == test.data$Diagnostic)
## [1] 0.6346154
# Summary
confusionMatrix(predicted.classes.multi.nova4, test.data$Diagnostic)
## Confusion Matrix and Statistics
##
## Reference
## Prediction CTL AD MCI
## CTL 32 5 14
## AD 0 1 0
## MCI 0 0 0
##
## Overall Statistics
##
## Accuracy : 0.6346
## 95% CI : (0.4896, 0.7638)
## No Information Rate : 0.6154
## P-Value [Acc > NIR] : 0.4477
##
## Kappa : 0.0732
##
## Mcnemar's Test P-Value : NA
##
## Statistics by Class:
##
## Class: CTL Class: AD Class: MCI
## Sensitivity 1.0000 0.16667 0.0000
## Specificity 0.0500 1.00000 1.0000
## Pos Pred Value 0.6275 1.00000 NaN
## Neg Pred Value 1.0000 0.90196 0.7308
## Prevalence 0.6154 0.11538 0.2692
## Detection Rate 0.6154 0.01923 0.0000
## Detection Prevalence 0.9808 0.01923 0.0000
## Balanced Accuracy 0.5250 0.58333 0.5000
#ROC
multiclass.roc(
as.numeric(test.data$Diagnostic),
as.numeric(predicted.classes.multi.nova4),
percent = F,
ci.alpha = 0.9,
stratified = FALSE,
plot = TRUE,
grid = TRUE,
legacy.axes = TRUE,
reuse.auc = TRUE,
print.auc = TRUE,
print.thres.col = "blue",
ci.type = "bars",
print.thres.cex = 0.7,
main = "ROC curve",
ylab = "Sensitivity (true positive rate)",
xlab = "1-Specificity (false positive rate)"
)
## Setting direction: controls < cases
## Setting direction: controls < cases
## Setting direction: controls < cases
##
## Call:
## multiclass.roc.default(response = as.numeric(test.data$Diagnostic), predictor = as.numeric(predicted.classes.multi.nova4), percent = F, ci.alpha = 0.9, stratified = FALSE, plot = TRUE, grid = TRUE, legacy.axes = TRUE, reuse.auc = TRUE, print.auc = TRUE, print.thres.col = "blue", ci.type = "bars", print.thres.cex = 0.7, main = "ROC curve", ylab = "Sensitivity (true positive rate)", xlab = "1-Specificity (false positive rate)")
##
## Data: as.numeric(predicted.classes.multi.nova4) with 3 levels of as.numeric(test.data$Diagnostic): 1, 2, 3.
## Multi-class area under the curve: 0.5
m.multi.nova5 <-
multinom(Diagnostic ~ logAvgThickness_age_decay, data = train.data)
## # weights: 9 (4 variable)
## initial value 213.130784
## iter 10 value 166.470640
## iter 20 value 163.920245
## iter 30 value 163.853959
## iter 40 value 163.846199
## iter 50 value 163.844400
## final value 163.844049
## converged
stargazer(m.multi.nova5, type = "text")
##
## ======================================================
## Dependent variable:
## ----------------------------
## AD MCI
## (1) (2)
## ------------------------------------------------------
## logAvgThickness_age_decay -60.290*** -19.554*
## (18.616) (11.419)
##
## Constant 23.980*** 7.594
## (7.892) (4.925)
##
## ------------------------------------------------------
## Akaike Inf. Crit. 335.688 335.688
## ======================================================
## Note: *p<0.1; **p<0.05; ***p<0.01
z5 <-
summary(m.multi.nova5)$coefficients / summary(m.multi.nova5)$standard.errors
p5 <- (1 - pnorm(abs(z5), 0, 1)) * 2
t(p5)
## AD MCI
## (Intercept) 0.002377836 0.1231297
## logAvgThickness_age_decay 0.001200961 0.0868159
#Para facilitar a interpreta??o:
coef.multi5 = exp(coef(m.multi.nova5))
t(coef.multi5)
## AD MCI
## (Intercept) 2.595699e+10 1.985697e+03
## logAvgThickness_age_decay 6.553701e-27 3.221050e-09
#Previsoes
predicted.classes.multi.nova5 <- m.multi.nova5 %>% predict(test.data, type = "class")
#Model accuracy
mean(predicted.classes.multi.nova5 == test.data$Diagnostic)
## [1] 0.6153846
# Summary
confusionMatrix(predicted.classes.multi.nova5, test.data$Diagnostic)
## Confusion Matrix and Statistics
##
## Reference
## Prediction CTL AD MCI
## CTL 32 6 14
## AD 0 0 0
## MCI 0 0 0
##
## Overall Statistics
##
## Accuracy : 0.6154
## 95% CI : (0.4702, 0.747)
## No Information Rate : 0.6154
## P-Value [Acc > NIR] : 0.5608
##
## Kappa : 0
##
## Mcnemar's Test P-Value : NA
##
## Statistics by Class:
##
## Class: CTL Class: AD Class: MCI
## Sensitivity 1.0000 0.0000 0.0000
## Specificity 0.0000 1.0000 1.0000
## Pos Pred Value 0.6154 NaN NaN
## Neg Pred Value NaN 0.8846 0.7308
## Prevalence 0.6154 0.1154 0.2692
## Detection Rate 0.6154 0.0000 0.0000
## Detection Prevalence 1.0000 0.0000 0.0000
## Balanced Accuracy 0.5000 0.5000 0.5000
#ROC
multiclass.roc(
as.numeric(test.data$Diagnostic),
as.numeric(predicted.classes.multi.nova5),
percent = F,
ci.alpha = 0.9,
stratified = FALSE,
plot = TRUE,
grid = TRUE,
legacy.axes = TRUE,
reuse.auc = TRUE,
print.auc = TRUE,
print.thres.col = "blue",
ci.type = "bars",
print.thres.cex = 0.7,
main = "ROC curve",
ylab = "Sensitivity (true positive rate)",
xlab = "1-Specificity (false positive rate)"
)
## Setting direction: controls < cases
## Setting direction: controls < cases
## Setting direction: controls < cases
##
## Call:
## multiclass.roc.default(response = as.numeric(test.data$Diagnostic), predictor = as.numeric(predicted.classes.multi.nova5), percent = F, ci.alpha = 0.9, stratified = FALSE, plot = TRUE, grid = TRUE, legacy.axes = TRUE, reuse.auc = TRUE, print.auc = TRUE, print.thres.col = "blue", ci.type = "bars", print.thres.cex = 0.7, main = "ROC curve", ylab = "Sensitivity (true positive rate)", xlab = "1-Specificity (false positive rate)")
##
## Data: as.numeric(predicted.classes.multi.nova5) with 3 levels of as.numeric(test.data$Diagnostic): 1, 2, 3.
## Multi-class area under the curve: 0.5
m.multi.nova12 <-
multinom(Diagnostic ~ I_age_decay, data = train.data)
## # weights: 9 (4 variable)
## initial value 213.130784
## iter 10 value 170.306538
## iter 20 value 169.712771
## iter 30 value 169.478085
## iter 40 value 169.281733
## iter 50 value 169.258821
## final value 169.250148
## converged
stargazer(m.multi.nova12, type = "text")
##
## ==============================================
## Dependent variable:
## ----------------------------
## AD MCI
## (1) (2)
## ----------------------------------------------
## I_age_decay -6.001 0.280
## (4.004) (2.638)
##
## Constant 57.282 -3.609
## (39.396) (26.003)
##
## ----------------------------------------------
## Akaike Inf. Crit. 346.500 346.500
## ==============================================
## Note: *p<0.1; **p<0.05; ***p<0.01
z12 <-
summary(m.multi.nova12)$coefficients / summary(m.multi.nova12)$standard.errors
p12 <- (1 - pnorm(abs(z12), 0, 1)) * 2
t(p12)
## AD MCI
## (Intercept) 0.1459395 0.8896081
## I_age_decay 0.1339448 0.9155752
#Para facilitar a interpreta??o:
coef.multi12 = exp(coef(m.multi.nova12))
t(coef.multi12)
## AD MCI
## (Intercept) 7.541402e+24 0.02707381
## I_age_decay 2.475119e-03 1.32261026
#Previsoes
predicted.classes.multi.nova12 <- m.multi.nova12 %>% predict(test.data, type = "class")
#Model accuracy
mean(predicted.classes.multi.nova12 == test.data$Diagnostic)
## [1] 0.6153846
# Summary
confusionMatrix(predicted.classes.multi.nova12, test.data$Diagnostic)
## Confusion Matrix and Statistics
##
## Reference
## Prediction CTL AD MCI
## CTL 32 6 14
## AD 0 0 0
## MCI 0 0 0
##
## Overall Statistics
##
## Accuracy : 0.6154
## 95% CI : (0.4702, 0.747)
## No Information Rate : 0.6154
## P-Value [Acc > NIR] : 0.5608
##
## Kappa : 0
##
## Mcnemar's Test P-Value : NA
##
## Statistics by Class:
##
## Class: CTL Class: AD Class: MCI
## Sensitivity 1.0000 0.0000 0.0000
## Specificity 0.0000 1.0000 1.0000
## Pos Pred Value 0.6154 NaN NaN
## Neg Pred Value NaN 0.8846 0.7308
## Prevalence 0.6154 0.1154 0.2692
## Detection Rate 0.6154 0.0000 0.0000
## Detection Prevalence 1.0000 0.0000 0.0000
## Balanced Accuracy 0.5000 0.5000 0.5000
#ROC
multiclass.roc(
as.numeric(test.data$Diagnostic),
as.numeric(predicted.classes.multi.nova12),
percent = F,
ci.alpha = 0.9,
stratified = FALSE,
plot = TRUE,
grid = TRUE,
legacy.axes = TRUE,
reuse.auc = TRUE,
print.auc = TRUE,
print.thres.col = "blue",
ci.type = "bars",
print.thres.cex = 0.7,
main = "ROC curve",
ylab = "Sensitivity (true positive rate)",
xlab = "1-Specificity (false positive rate)"
)
## Setting direction: controls < cases
## Setting direction: controls < cases
## Setting direction: controls < cases
##
## Call:
## multiclass.roc.default(response = as.numeric(test.data$Diagnostic), predictor = as.numeric(predicted.classes.multi.nova12), percent = F, ci.alpha = 0.9, stratified = FALSE, plot = TRUE, grid = TRUE, legacy.axes = TRUE, reuse.auc = TRUE, print.auc = TRUE, print.thres.col = "blue", ci.type = "bars", print.thres.cex = 0.7, main = "ROC curve", ylab = "Sensitivity (true positive rate)", xlab = "1-Specificity (false positive rate)")
##
## Data: as.numeric(predicted.classes.multi.nova12) with 3 levels of as.numeric(test.data$Diagnostic): 1, 2, 3.
## Multi-class area under the curve: 0.5
m.multi.nova13 <-
multinom(Diagnostic ~ S_age_decay, data = train.data)
## # weights: 9 (4 variable)
## initial value 213.130784
## iter 10 value 170.414300
## iter 20 value 170.016710
## iter 30 value 169.988447
## final value 169.984663
## converged
stargazer(m.multi.nova12, type = "text")
##
## ==============================================
## Dependent variable:
## ----------------------------
## AD MCI
## (1) (2)
## ----------------------------------------------
## I_age_decay -6.001 0.280
## (4.004) (2.638)
##
## Constant 57.282 -3.609
## (39.396) (26.003)
##
## ----------------------------------------------
## Akaike Inf. Crit. 346.500 346.500
## ==============================================
## Note: *p<0.1; **p<0.05; ***p<0.01
z13 <-
summary(m.multi.nova13)$coefficients / summary(m.multi.nova13)$standard.errors
p13 <- (1 - pnorm(abs(z13), 0, 1)) * 2
t(p13)
## AD MCI
## (Intercept) 0.7471431 0.2956349
## S_age_decay 0.7923474 0.3146659
#Para facilitar a interpreta??o:
coef.multi13 = exp(coef(m.multi.nova13))
t(coef.multi13)
## AD MCI
## (Intercept) 5.289991e-05 2.959005e-10
## S_age_decay 2.132189e+00 7.281722e+00
#Previsoes
predicted.classes.multi.nova13 <- m.multi.nova13 %>% predict(test.data, type = "class")
#Model accuracy
mean(predicted.classes.multi.nova13 == test.data$Diagnostic)
## [1] 0.6153846
# Summary
confusionMatrix(predicted.classes.multi.nova13, test.data$Diagnostic)
## Confusion Matrix and Statistics
##
## Reference
## Prediction CTL AD MCI
## CTL 32 6 14
## AD 0 0 0
## MCI 0 0 0
##
## Overall Statistics
##
## Accuracy : 0.6154
## 95% CI : (0.4702, 0.747)
## No Information Rate : 0.6154
## P-Value [Acc > NIR] : 0.5608
##
## Kappa : 0
##
## Mcnemar's Test P-Value : NA
##
## Statistics by Class:
##
## Class: CTL Class: AD Class: MCI
## Sensitivity 1.0000 0.0000 0.0000
## Specificity 0.0000 1.0000 1.0000
## Pos Pred Value 0.6154 NaN NaN
## Neg Pred Value NaN 0.8846 0.7308
## Prevalence 0.6154 0.1154 0.2692
## Detection Rate 0.6154 0.0000 0.0000
## Detection Prevalence 1.0000 0.0000 0.0000
## Balanced Accuracy 0.5000 0.5000 0.5000
#ROC
multiclass.roc(
as.numeric(test.data$Diagnostic),
as.numeric(predicted.classes.multi.nova13),
percent = F,
ci.alpha = 0.9,
stratified = FALSE,
plot = TRUE,
grid = TRUE,
legacy.axes = TRUE,
reuse.auc = TRUE,
print.auc = TRUE,
print.thres.col = "blue",
ci.type = "bars",
print.thres.cex = 0.7,
main = "ROC curve",
ylab = "Sensitivity (true positive rate)",
xlab = "1-Specificity (false positive rate)"
)
## Setting direction: controls < cases
## Setting direction: controls < cases
## Setting direction: controls < cases
##
## Call:
## multiclass.roc.default(response = as.numeric(test.data$Diagnostic), predictor = as.numeric(predicted.classes.multi.nova13), percent = F, ci.alpha = 0.9, stratified = FALSE, plot = TRUE, grid = TRUE, legacy.axes = TRUE, reuse.auc = TRUE, print.auc = TRUE, print.thres.col = "blue", ci.type = "bars", print.thres.cex = 0.7, main = "ROC curve", ylab = "Sensitivity (true positive rate)", xlab = "1-Specificity (false positive rate)")
##
## Data: as.numeric(predicted.classes.multi.nova13) with 3 levels of as.numeric(test.data$Diagnostic): 1, 2, 3.
## Multi-class area under the curve: 0.5
m.multi.nova0_0_0 <-
multinom(Diagnostic ~ K_age_decay + I_age_decay + S_age_decay, data = train.data)
## # weights: 15 (8 variable)
## initial value 213.130784
## iter 10 value 160.519908
## iter 20 value 157.322580
## iter 30 value 154.778186
## iter 40 value 154.183429
## iter 50 value 154.049788
## iter 60 value 154.014659
## iter 70 value 154.005873
## iter 70 value 154.005872
## final value 154.005865
## converged
stargazer(m.multi.nova0_0_0, type = "text")
##
## ==============================================
## Dependent variable:
## ----------------------------
## AD MCI
## (1) (2)
## ----------------------------------------------
## K_age_decay -87.824*** -11.735
## (21.825) (12.278)
##
## I_age_decay -16.776** -5.902
## (8.428) (5.082)
##
## S_age_decay 9.332 5.304
## (6.899) (3.920)
##
## Constant 19.055 -4.956
## (41.486) (26.301)
##
## ----------------------------------------------
## Akaike Inf. Crit. 324.012 324.012
## ==============================================
## Note: *p<0.1; **p<0.05; ***p<0.01
z0_0_0 <-
summary(m.multi.nova0_0_0)$coefficients / summary(m.multi.nova0_0_0)$standard.errors
p0_0_0 <- (1 - pnorm(abs(z0_0_0), 0, 1)) * 2
t(p0_0_0)
## AD MCI
## (Intercept) 6.460096e-01 0.8505317
## K_age_decay 5.723595e-05 0.3391912
## I_age_decay 4.655246e-02 0.2454558
## S_age_decay 1.761714e-01 0.1760227
#Para facilitar a interpreta??o:
coef.multi0_0_0 = exp(coef(m.multi.nova0_0_0))
t(coef.multi0_0_0)
## AD MCI
## (Intercept) 1.886052e+08 7.040094e-03
## K_age_decay 7.222439e-39 8.011697e-06
## I_age_decay 5.181607e-08 2.733594e-03
## S_age_decay 1.129629e+04 2.010479e+02
#Previsoes
predicted.classes.multi.nova0_0_0 <- m.multi.nova0_0_0 %>% predict(test.data, type = "class")
#Model accuracy
mean(predicted.classes.multi.nova0_0_0 == test.data$Diagnostic)
## [1] 0.6346154
# Summary
confusionMatrix(predicted.classes.multi.nova0_0_0, test.data$Diagnostic)
## Confusion Matrix and Statistics
##
## Reference
## Prediction CTL AD MCI
## CTL 32 5 13
## AD 0 1 1
## MCI 0 0 0
##
## Overall Statistics
##
## Accuracy : 0.6346
## 95% CI : (0.4896, 0.7638)
## No Information Rate : 0.6154
## P-Value [Acc > NIR] : 0.4477163
##
## Kappa : 0.0952
##
## Mcnemar's Test P-Value : 0.0002734
##
## Statistics by Class:
##
## Class: CTL Class: AD Class: MCI
## Sensitivity 1.0000 0.16667 0.0000
## Specificity 0.1000 0.97826 1.0000
## Pos Pred Value 0.6400 0.50000 NaN
## Neg Pred Value 1.0000 0.90000 0.7308
## Prevalence 0.6154 0.11538 0.2692
## Detection Rate 0.6154 0.01923 0.0000
## Detection Prevalence 0.9615 0.03846 0.0000
## Balanced Accuracy 0.5500 0.57246 0.5000
#ROC
multiclass.roc(
as.numeric(test.data$Diagnostic),
as.numeric(predicted.classes.multi.nova0_0_0),
percent = F,
ci.alpha = 0.9,
stratified = FALSE,
plot = TRUE,
grid = TRUE,
legacy.axes = TRUE,
reuse.auc = TRUE,
print.auc = TRUE,
print.thres.col = "blue",
ci.type = "bars",
print.thres.cex = 0.7,
main = "ROC curve",
ylab = "Sensitivity (true positive rate)",
xlab = "1-Specificity (false positive rate)"
)
## Setting direction: controls < cases
## Setting direction: controls < cases
## Setting direction: controls < cases
##
## Call:
## multiclass.roc.default(response = as.numeric(test.data$Diagnostic), predictor = as.numeric(predicted.classes.multi.nova0_0_0), percent = F, ci.alpha = 0.9, stratified = FALSE, plot = TRUE, grid = TRUE, legacy.axes = TRUE, reuse.auc = TRUE, print.auc = TRUE, print.thres.col = "blue", ci.type = "bars", print.thres.cex = 0.7, main = "ROC curve", ylab = "Sensitivity (true positive rate)", xlab = "1-Specificity (false positive rate)")
##
## Data: as.numeric(predicted.classes.multi.nova0_0_0) with 3 levels of as.numeric(test.data$Diagnostic): 1, 2, 3.
## Multi-class area under the curve: 0.5238
m.multi.nova.Gender3 <-
multinom(Diagnostic ~ K_age_decay + Gender, data = train.data)
## # weights: 12 (6 variable)
## initial value 213.130784
## iter 10 value 165.773396
## iter 20 value 154.735123
## iter 30 value 154.108950
## iter 40 value 154.095777
## final value 154.095697
## converged
stargazer(m.multi.nova.Gender3, type = "text")
##
## ==============================================
## Dependent variable:
## ----------------------------
## AD MCI
## (1) (2)
## ----------------------------------------------
## K_age_decay -95.434*** -11.005
## (21.282) (12.156)
##
## GenderMASC -0.561 0.727**
## (0.579) (0.347)
##
## Constant -50.786*** -6.729
## (11.046) (6.149)
##
## ----------------------------------------------
## Akaike Inf. Crit. 320.191 320.191
## ==============================================
## Note: *p<0.1; **p<0.05; ***p<0.01
z.Gender3 <-
summary(m.multi.nova.Gender3)$coefficients / summary(m.multi.nova.Gender3)$standard.errors
p.Gender3 <- (1 - pnorm(abs(z.Gender3), 0, 1)) * 2
t(p.Gender3)
## AD MCI
## (Intercept) 4.273337e-06 0.27379315
## K_age_decay 7.319942e-06 0.36529343
## GenderMASC 3.321348e-01 0.03625178
#Para facilitar a interpreta??o:
coef.multi.Gender3 = exp(coef(m.multi.nova.Gender3))
t(coef.multi.Gender3)
## AD MCI
## (Intercept) 8.791876e-23 1.195289e-03
## K_age_decay 3.578725e-42 1.662241e-05
## GenderMASC 5.705494e-01 2.068302e+00
#Previsoes
predicted.classes.multi.nova.Gender3 <- m.multi.nova.Gender3 %>% predict(test.data, type = "class")
#Model accuracy
mean(predicted.classes.multi.nova.Gender3 == test.data$Diagnostic)
## [1] 0.6346154
# Summary
confusionMatrix(predicted.classes.multi.nova.Gender3, test.data$Diagnostic)
## Confusion Matrix and Statistics
##
## Reference
## Prediction CTL AD MCI
## CTL 32 5 14
## AD 0 1 0
## MCI 0 0 0
##
## Overall Statistics
##
## Accuracy : 0.6346
## 95% CI : (0.4896, 0.7638)
## No Information Rate : 0.6154
## P-Value [Acc > NIR] : 0.4477
##
## Kappa : 0.0732
##
## Mcnemar's Test P-Value : NA
##
## Statistics by Class:
##
## Class: CTL Class: AD Class: MCI
## Sensitivity 1.0000 0.16667 0.0000
## Specificity 0.0500 1.00000 1.0000
## Pos Pred Value 0.6275 1.00000 NaN
## Neg Pred Value 1.0000 0.90196 0.7308
## Prevalence 0.6154 0.11538 0.2692
## Detection Rate 0.6154 0.01923 0.0000
## Detection Prevalence 0.9808 0.01923 0.0000
## Balanced Accuracy 0.5250 0.58333 0.5000
#ROC
multiclass.roc(
as.numeric(test.data$Diagnostic),
as.numeric(predicted.classes.multi.nova.Gender3),
percent = F,
ci.alpha = 0.9,
stratified = FALSE,
plot = TRUE,
grid = FALSE,
legacy.axes = TRUE,
reuse.auc = TRUE,
print.auc = TRUE,
print.thres.col = "blue",
ci.type = "bars",
print.thres.cex = 0.7,
main = "ROC curve",
ylab = "Sensitivity (true positive rate)",
xlab = "1-Specificity (false positive rate)"
)
## Setting direction: controls < cases
## Setting direction: controls < cases
## Setting direction: controls < cases
##
## Call:
## multiclass.roc.default(response = as.numeric(test.data$Diagnostic), predictor = as.numeric(predicted.classes.multi.nova.Gender3), percent = F, ci.alpha = 0.9, stratified = FALSE, plot = TRUE, grid = FALSE, legacy.axes = TRUE, reuse.auc = TRUE, print.auc = TRUE, print.thres.col = "blue", ci.type = "bars", print.thres.cex = 0.7, main = "ROC curve", ylab = "Sensitivity (true positive rate)", xlab = "1-Specificity (false positive rate)")
##
## Data: as.numeric(predicted.classes.multi.nova.Gender3) with 3 levels of as.numeric(test.data$Diagnostic): 1, 2, 3.
## Multi-class area under the curve: 0.5
m.multi.nova.Gender4 <-
multinom(Diagnostic ~ logAvgThickness + Age + Gender, data = train.data)
## # weights: 15 (8 variable)
## initial value 213.130784
## iter 10 value 137.019686
## iter 20 value 135.406470
## iter 30 value 134.647746
## iter 40 value 133.996241
## iter 50 value 133.939802
## iter 60 value 133.891576
## iter 70 value 133.853402
## iter 70 value 133.853402
## iter 70 value 133.853400
## final value 133.853400
## converged
stargazer(m.multi.nova.Gender4, type = "text")
##
## ==============================================
## Dependent variable:
## ----------------------------
## AD MCI
## (1) (2)
## ----------------------------------------------
## logAvgThickness -56.659*** -18.619
## (3.556) (11.620)
##
## Age 0.299*** 0.101***
## (0.067) (0.032)
##
## GenderMASC -0.809 0.690*
## (0.691) (0.364)
##
## Constant -1.401 -0.813
## (4.506) (5.673)
##
## ----------------------------------------------
## Akaike Inf. Crit. 283.707 283.707
## ==============================================
## Note: *p<0.1; **p<0.05; ***p<0.01
z.Gender4 <-
summary(m.multi.nova.Gender4)$coefficients / summary(m.multi.nova.Gender4)$standard.errors
p.Gender4 <- (1 - pnorm(abs(z.Gender4), 0, 1)) * 2
t(p.Gender4)
## AD MCI
## (Intercept) 7.557704e-01 0.886047910
## logAvgThickness 0.000000e+00 0.109082476
## Age 7.282412e-06 0.001659953
## GenderMASC 2.421014e-01 0.058045876
#Para facilitar a interpreta??o:
coef.multi.Gender4 = exp(coef(m.multi.nova.Gender4))
t(coef.multi.Gender4)
## AD MCI
## (Intercept) 2.462509e-01 4.435506e-01
## logAvgThickness 2.473268e-25 8.202890e-09
## Age 1.348289e+00 1.106460e+00
## GenderMASC 4.454340e-01 1.994658e+00
#Previsoes
predicted.classes.multi.nova.Gender4 <- m.multi.nova.Gender4 %>% predict(test.data, type = "class")
#Model accuracy
mean(predicted.classes.multi.nova.Gender4 == test.data$Diagnostic)
## [1] 0.6346154
# Summary
confusionMatrix(predicted.classes.multi.nova.Gender4, test.data$Diagnostic)
## Confusion Matrix and Statistics
##
## Reference
## Prediction CTL AD MCI
## CTL 29 1 8
## AD 1 2 4
## MCI 2 3 2
##
## Overall Statistics
##
## Accuracy : 0.6346
## 95% CI : (0.4896, 0.7638)
## No Information Rate : 0.6154
## P-Value [Acc > NIR] : 0.4477
##
## Kappa : 0.2671
##
## Mcnemar's Test P-Value : 0.2906
##
## Statistics by Class:
##
## Class: CTL Class: AD Class: MCI
## Sensitivity 0.9062 0.33333 0.14286
## Specificity 0.5500 0.89130 0.86842
## Pos Pred Value 0.7632 0.28571 0.28571
## Neg Pred Value 0.7857 0.91111 0.73333
## Prevalence 0.6154 0.11538 0.26923
## Detection Rate 0.5577 0.03846 0.03846
## Detection Prevalence 0.7308 0.13462 0.13462
## Balanced Accuracy 0.7281 0.61232 0.50564
#ROC
multiclass.roc(
as.numeric(test.data$Diagnostic),
as.numeric(predicted.classes.multi.nova.Gender4),
percent = F,
ci.alpha = 0.9,
stratified = FALSE,
plot = TRUE,
grid = FALSE,
legacy.axes = TRUE,
reuse.auc = TRUE,
print.auc = TRUE,
print.thres.col = "blue",
ci.type = "bars",
print.thres.cex = 0.7,
main = "ROC curve",
ylab = "Sensitivity (true positive rate)",
xlab = "1-Specificity (false positive rate)"
)
## Setting direction: controls < cases
## Setting direction: controls < cases
## Setting direction: controls > cases
##
## Call:
## multiclass.roc.default(response = as.numeric(test.data$Diagnostic), predictor = as.numeric(predicted.classes.multi.nova.Gender4), percent = F, ci.alpha = 0.9, stratified = FALSE, plot = TRUE, grid = FALSE, legacy.axes = TRUE, reuse.auc = TRUE, print.auc = TRUE, print.thres.col = "blue", ci.type = "bars", print.thres.cex = 0.7, main = "ROC curve", ylab = "Sensitivity (true positive rate)", xlab = "1-Specificity (false positive rate)")
##
## Data: as.numeric(predicted.classes.multi.nova.Gender4) with 3 levels of as.numeric(test.data$Diagnostic): 1, 2, 3.
## Multi-class area under the curve: 0.7593
#dados_lobos_v1_T$Diagnostic <- factor(dados_lobos_v1_T$Diagnostic, levels = c("AD", "MCI","CTL"))
dados_lobos_v1_T$Diagnostic <- relevel(dados_lobos_v1_T$Diagnostic, "CTL")
# test.samples <- c(sample(which(dados_hemi_v1_filter$Diagnostic == "AD"), N_ALZ), sample(which(dados_hemi_v1_filter$Diagnostic == "CTL"), N_CTL), sample(which(dados_hemi_v1_filter$Diagnostic == "MCI"), N_CCL))
# subj.training <- as_tibble(dados_hemi_v1_filter[-test.samples, $SUBJ)
# colnames(subj.training) <- c("SUBJ")
# filter(dados_lobos_v1_T, SUBJ == subj.training)
train.data_lobes <- anti_join(dados_lobos_v1_T, subj.training)
## Joining, by = "SUBJ"
test.data_lobes <- semi_join(dados_lobos_v1_T, subj.training)
## Joining, by = "SUBJ"
#train.data_lobes <- dados_lobos_v1_T[-test.samples, ]
#test.data_lobes <- dados_lobos_v1_T[test.samples, ]
caret::featurePlot(x = dados_lobos_v1_T[, c("K", "logAvgThickness", "K_age_decay", "logAvgThickness_age_decay")], y = dados_lobos_v1_T$Diagnostic, plot = "box", scales = list(y = list(relation = "free"), x = list(rot = 90)), layout = c(4, 1))
print(n_distinct(dados_lobos_v1_T$SUBJ))
## [1] 123
print(n_distinct(train.data_lobes$SUBJ))
## [1] 97
print(n_distinct(test.data_lobes$SUBJ))
## [1] 26
# ggplot(dados_lobos_v1_T, aes(x = Diagnostic, y = K_corrected, color = Diagnostic, fill = Diagnostic)) +
# geom_violin(trim = FALSE, alpha = 0.4) + geom_jitter() +
# theme_pubr() + stat_compare_means(method = "anova") + labs(caption = paste("N = ", n_distinct(filter(dados_lobos_v1_T, !is.na(K))$SUBJ)))
#aov1.l <- aov(K ~ Diagnostic, dados_lobos_v1_T)
#aov1.l_TK <-TukeyHSD(aov1.l)
#aov1.l_TK
#plot(aov1.l_TK , las=1 , col="brown")
# ggplot(dados_lobos_v1_T, aes(x = Diagnostic, y = K_age_decay, color = Diagnostic, fill = Diagnostic)) +
# geom_violin(trim = FALSE, alpha = 0.4) + geom_jitter() +
# theme_pubr() + stat_compare_means(method = "anova") + labs(caption = paste("N = ", n_distinct(filter(dados_lobos_v1_T, !is.na(K_age_decay))$SUBJ)))
#aov2.l <- aov(K_age_decay ~ Diagnostic, dados_lobos_v1_T)
#aov2.l_TK <-TukeyHSD(aov2.l)
#aov2.l_TK
#plot(aov2.l_TK , las=1 , col="brown")
# ggplot(dados_lobos_v1_T, aes(x = Diagnostic, y = logAvgThickness, color = Diagnostic, fill = Diagnostic)) +
# geom_violin(trim = FALSE, alpha = 0.4) + geom_jitter() +
# theme_pubr() + stat_compare_means(method = "anova") + labs(caption = paste("N = ", n_distinct(filter(dados_lobos_v1_T, !is.na(logAvgThickness))$SUBJ)))
#aov3.l <- aov(logAvgThickness ~ Diagnostic, dados_lobos_v1_T)
#TukeyHSD(aov3.l)
#aov3.l_TK <-TukeyHSD(aov3.l)
#aov3.l_TK
#plot(aov3.l_TK , las=1 , col="brown")
# ggplot(dados_lobos_v1_T, aes(x = Diagnostic, y = logAvgThickness_age_decay, color = Diagnostic, fill = Diagnostic)) +
# geom_violin(trim = FALSE, alpha = 0.4) + geom_jitter() +
# theme_pubr() + stat_compare_means(method = "anova") + labs(caption = paste("N = ", n_distinct(filter(dados_lobos_v1_T, !is.na(logAvgThickness_age_decay))$SUBJ)))
#aov4.l <- aov(logAvgThickness_age_decay ~ Diagnostic, dados_lobos_v1_T)
#aov4.l_TK <-TukeyHSD(aov4.l)
#aov4.l_TK
#plot(aov4.l_TK , las=1 , col="brown")
caret::featurePlot(x = train.data_lobes[, c("K", "logAvgThickness", "K_age_decay", "logAvgThickness_age_decay")], y = train.data_lobes$Diagnostic, plot = "box", scales = list(y = list(relation = "free"), x = list(rot = 90)), layout = c(4, 1))
multinom1.l <- multinom(Diagnostic ~ K_corrected + Age, data = train.data_lobes)
## # weights: 12 (6 variable)
## initial value 209.834947
## iter 10 value 141.575398
## iter 20 value 140.157003
## iter 30 value 136.694806
## iter 40 value 136.396273
## iter 50 value 136.336741
## iter 60 value 136.305878
## iter 70 value 136.298883
## iter 80 value 136.289140
## iter 80 value 136.289140
## final value 136.289134
## converged
multinom2.l <- multinom(Diagnostic ~ logAvgThickness + Age, data = train.data_lobes)
## # weights: 12 (6 variable)
## initial value 209.834947
## iter 10 value 136.015187
## iter 20 value 134.489485
## iter 30 value 129.671782
## iter 40 value 128.743859
## iter 50 value 128.122835
## iter 60 value 127.916315
## iter 70 value 127.888697
## iter 80 value 127.873220
## iter 90 value 127.866424
## final value 127.865462
## converged
multinom4.l <- multinom(Diagnostic ~ K_age_decay, data = train.data_lobes)
## # weights: 9 (4 variable)
## initial value 209.834947
## iter 10 value 159.450633
## iter 20 value 157.883619
## iter 30 value 157.818715
## final value 157.812182
## converged
multinom5.l <- multinom(Diagnostic ~ logAvgThickness_age_decay, data = train.data_lobes)
## # weights: 9 (4 variable)
## initial value 209.834947
## iter 10 value 153.840025
## iter 20 value 151.476849
## iter 30 value 151.287969
## iter 40 value 151.246333
## iter 50 value 151.236198
## iter 60 value 151.235080
## iter 70 value 151.234570
## final value 151.234496
## converged
multinom0.l <- multinom(Diagnostic ~ K_corrected + Age + ESC, data = train.data_lobes)
## # weights: 15 (8 variable)
## initial value 209.834947
## iter 10 value 133.159250
## iter 20 value 129.925225
## iter 30 value 127.989850
## iter 40 value 126.403215
## iter 50 value 125.931577
## iter 60 value 125.672273
## iter 70 value 125.531204
## iter 80 value 125.472788
## iter 90 value 125.397238
## iter 100 value 125.382025
## final value 125.382025
## stopped after 100 iterations
multinom0_2.l <- multinom(Diagnostic ~ logAvgThickness + Age + ESC, data = train.data_lobes)
## # weights: 15 (8 variable)
## initial value 209.834947
## iter 10 value 130.024743
## iter 20 value 126.633979
## iter 30 value 123.999814
## iter 40 value 120.794615
## iter 50 value 120.084969
## iter 60 value 119.740361
## iter 70 value 119.393359
## iter 80 value 119.273031
## iter 90 value 119.045426
## iter 100 value 118.992056
## final value 118.992056
## stopped after 100 iterations
## da estatistica ##
summary(multinom1.l)
## Call:
## multinom(formula = Diagnostic ~ K_corrected + Age, data = train.data_lobes)
##
## Coefficients:
## (Intercept) K_corrected Age
## AD -43.42394 -40.17592 0.2878773
## MCI -16.12119 -15.71296 0.1050940
##
## Std. Errors:
## (Intercept) K_corrected Age
## AD 8.917722 16.223212 0.06919745
## MCI 4.875215 9.573517 0.03211611
##
## Residual Deviance: 272.5783
## AIC: 284.5783
summary(multinom2.l)
## Call:
## multinom(formula = Diagnostic ~ logAvgThickness + Age, data = train.data_lobes)
##
## Coefficients:
## (Intercept) logAvgThickness Age
## AD 14.236634 -82.89300 0.27513007
## MCI 6.076846 -29.83423 0.09138058
##
## Std. Errors:
## (Intercept) logAvgThickness Age
## AD 4.474889 3.108388 0.07002867
## MCI 5.260717 9.641075 0.03258314
##
## Residual Deviance: 255.7309
## AIC: 267.7309
summary(multinom4.l)
## Call:
## multinom(formula = Diagnostic ~ K_age_decay, data = train.data_lobes)
##
## Coefficients:
## (Intercept) K_age_decay
## AD -29.281719 -56.63954
## MCI -9.986388 -19.07838
##
## Std. Errors:
## (Intercept) K_age_decay
## AD 6.942107 14.130529
## MCI 4.416435 9.197366
##
## Residual Deviance: 315.6244
## AIC: 323.6244
summary(multinom5.l)
## Call:
## multinom(formula = Diagnostic ~ logAvgThickness_age_decay, data = train.data_lobes)
##
## Coefficients:
## (Intercept) logAvgThickness_age_decay
## AD 33.99878 -74.85389
## MCI 13.87979 -30.34649
##
## Std. Errors:
## (Intercept) logAvgThickness_age_decay
## AD 7.248193 15.33938
## MCI 4.873226 10.06217
##
## Residual Deviance: 302.469
## AIC: 310.469
summary(multinom0.l)
## Call:
## multinom(formula = Diagnostic ~ K_corrected + Age + ESC, data = train.data_lobes)
##
## Coefficients:
## (Intercept) K_corrected Age ESC
## AD -36.39525 -52.84894 0.20199625 -0.5360482
## MCI -11.59963 -19.55334 0.07129166 -0.2803732
##
## Std. Errors:
## (Intercept) K_corrected Age ESC
## AD 4.885115 3.260441 0.07226061 0.13001756
## MCI 4.386738 8.728306 0.03255077 0.08451055
##
## Residual Deviance: 250.7641
## AIC: 266.7641
summary(multinom0_2.l)
## Call:
## multinom(formula = Diagnostic ~ logAvgThickness + Age + ESC,
## data = train.data_lobes)
##
## Coefficients:
## (Intercept) logAvgThickness Age ESC
## AD 28.60755 -88.59466 0.20757392 -0.5185264
## MCI 11.18113 -28.95464 0.06542701 -0.2510507
##
## Std. Errors:
## (Intercept) logAvgThickness Age ESC
## AD 5.007179 3.313323 0.07327224 0.13795842
## MCI 5.623141 9.778950 0.03278615 0.08433249
##
## Residual Deviance: 237.9841
## AIC: 253.9841
# anova(multinom5, multinom4, test = "Chisq")
## da estatistica ##
m.multi.nova1.l <-
multinom(Diagnostic ~ K_corrected + Age, data = train.data_lobes)
## # weights: 12 (6 variable)
## initial value 209.834947
## iter 10 value 141.575398
## iter 20 value 140.157003
## iter 30 value 136.694806
## iter 40 value 136.396273
## iter 50 value 136.336741
## iter 60 value 136.305878
## iter 70 value 136.298883
## iter 80 value 136.289140
## iter 80 value 136.289140
## final value 136.289134
## converged
stargazer(m.multi.nova1.l, type = "text")
##
## ==============================================
## Dependent variable:
## ----------------------------
## AD MCI
## (1) (2)
## ----------------------------------------------
## K_corrected -40.176** -15.713
## (16.223) (9.574)
##
## Age 0.288*** 0.105***
## (0.069) (0.032)
##
## Constant -43.424*** -16.121***
## (8.918) (4.875)
##
## ----------------------------------------------
## Akaike Inf. Crit. 284.578 284.578
## ==============================================
## Note: *p<0.1; **p<0.05; ***p<0.01
z1.l <-
summary(m.multi.nova1.l)$coefficients / summary(m.multi.nova1.l)$standard.errors
p1.l <- (1 - pnorm(abs(z1.l), 0, 1)) * 2
t(p1.l)
## AD MCI
## (Intercept) 1.119388e-06 0.0009438013
## K_corrected 1.326975e-02 0.1007363552
## Age 3.179273e-05 0.0010667055
#Para facilitar a interpreta??o:
coef.multi1.l = exp(coef(m.multi.nova1.l))
t(coef.multi1.l)
## AD MCI
## (Intercept) 1.384278e-19 9.969114e-08
## K_corrected 3.563037e-18 1.499509e-07
## Age 1.333594e+00 1.110815e+00
#Previsoes
predicted.classes.multi.nova1.l <- m.multi.nova1.l %>% predict(test.data_lobes, type = "class")
#Model accuracy
mean(predicted.classes.multi.nova1.l == test.data_lobes$Diagnostic)
## [1] 0.627451
# Summary
cM1.l <- confusionMatrix(predicted.classes.multi.nova1.l, test.data_lobes$Diagnostic)
cM1.l
## Confusion Matrix and Statistics
##
## Reference
## Prediction CTL AD MCI
## CTL 29 2 9
## AD 0 2 4
## MCI 2 2 1
##
## Overall Statistics
##
## Accuracy : 0.6275
## 95% CI : (0.4808, 0.7587)
## No Information Rate : 0.6078
## P-Value [Acc > NIR] : 0.44711
##
## Kappa : 0.2279
##
## Mcnemar's Test P-Value : 0.06813
##
## Statistics by Class:
##
## Class: CTL Class: AD Class: MCI
## Sensitivity 0.9355 0.33333 0.07143
## Specificity 0.4500 0.91111 0.89189
## Pos Pred Value 0.7250 0.33333 0.20000
## Neg Pred Value 0.8182 0.91111 0.71739
## Prevalence 0.6078 0.11765 0.27451
## Detection Rate 0.5686 0.03922 0.01961
## Detection Prevalence 0.7843 0.11765 0.09804
## Balanced Accuracy 0.6927 0.62222 0.48166
#cM1.l.t.score <- mutate(cM1.l, Diagnostic = c("AD", "MCI", "CTL"),sensitivity = as.data.frame(cM1.l$byClass[,1]), specificity = as.data.frame(cM1.l$byClass[,2]))
#ROC
multiclass.roc(
as.numeric(test.data_lobes$Diagnostic),
as.numeric(predicted.classes.multi.nova1.l),
percent = F,
ci.alpha = 0.9,
stratified = FALSE,
plot = TRUE,
grid = TRUE,
legacy.axes = TRUE,
reuse.auc = TRUE,
print.auc = TRUE,
print.thres.col = "blue",
ci.type = "bars",
print.thres.cex = 0.7,
main = "ROC curve",
ylab = "Sensitivity (true positive rate)",
xlab = "1-Specificity (false positive rate)"
)
## Setting direction: controls < cases
## Setting direction: controls < cases
## Setting direction: controls > cases
##
## Call:
## multiclass.roc.default(response = as.numeric(test.data_lobes$Diagnostic), predictor = as.numeric(predicted.classes.multi.nova1.l), percent = F, ci.alpha = 0.9, stratified = FALSE, plot = TRUE, grid = TRUE, legacy.axes = TRUE, reuse.auc = TRUE, print.auc = TRUE, print.thres.col = "blue", ci.type = "bars", print.thres.cex = 0.7, main = "ROC curve", ylab = "Sensitivity (true positive rate)", xlab = "1-Specificity (false positive rate)")
##
## Data: as.numeric(predicted.classes.multi.nova1.l) with 3 levels of as.numeric(test.data_lobes$Diagnostic): 1, 2, 3.
## Multi-class area under the curve: 0.706
m.multi.nova2.l <-
multinom(Diagnostic ~ logAvgThickness + Age, data = train.data_lobes)
## # weights: 12 (6 variable)
## initial value 209.834947
## iter 10 value 136.015187
## iter 20 value 134.489485
## iter 30 value 129.671782
## iter 40 value 128.743859
## iter 50 value 128.122835
## iter 60 value 127.916315
## iter 70 value 127.888697
## iter 80 value 127.873220
## iter 90 value 127.866424
## final value 127.865462
## converged
stargazer(m.multi.nova2.l, type = "text")
##
## ==============================================
## Dependent variable:
## ----------------------------
## AD MCI
## (1) (2)
## ----------------------------------------------
## logAvgThickness -82.893*** -29.834***
## (3.108) (9.641)
##
## Age 0.275*** 0.091***
## (0.070) (0.033)
##
## Constant 14.237*** 6.077
## (4.475) (5.261)
##
## ----------------------------------------------
## Akaike Inf. Crit. 267.731 267.731
## ==============================================
## Note: *p<0.1; **p<0.05; ***p<0.01
z2.l <-
summary(m.multi.nova2.l)$coefficients / summary(m.multi.nova2.l)$standard.errors
p2.l <- (1 - pnorm(abs(z2.l), 0, 1)) * 2
t(p2.l)
## AD MCI
## (Intercept) 1.465400e-03 0.248034627
## logAvgThickness 0.000000e+00 0.001971505
## Age 8.536362e-05 0.005038911
#Para facilitar a interpreta??o:
coef.multi2.l = exp(coef(m.multi.nova2.l))
t(coef.multi2.l)
## AD MCI
## (Intercept) 1.523672e+06 4.356528e+02
## logAvgThickness 1.000066e-36 1.104487e-13
## Age 1.316702e+00 1.095686e+00
#Previsoes
predicted.classes.multi.nova2.l <- m.multi.nova2.l %>% predict(test.data_lobes, type = "class")
#Model accuracy
mean(predicted.classes.multi.nova2.l == test.data_lobes$Diagnostic)
## [1] 0.6862745
# Summary
confusionMatrix(predicted.classes.multi.nova2.l, test.data_lobes$Diagnostic)
## Confusion Matrix and Statistics
##
## Reference
## Prediction CTL AD MCI
## CTL 31 3 9
## AD 0 3 4
## MCI 0 0 1
##
## Overall Statistics
##
## Accuracy : 0.6863
## 95% CI : (0.5411, 0.8089)
## No Information Rate : 0.6078
## P-Value [Acc > NIR] : 0.157693
##
## Kappa : 0.3267
##
## Mcnemar's Test P-Value : 0.001134
##
## Statistics by Class:
##
## Class: CTL Class: AD Class: MCI
## Sensitivity 1.0000 0.50000 0.07143
## Specificity 0.4000 0.91111 1.00000
## Pos Pred Value 0.7209 0.42857 1.00000
## Neg Pred Value 1.0000 0.93182 0.74000
## Prevalence 0.6078 0.11765 0.27451
## Detection Rate 0.6078 0.05882 0.01961
## Detection Prevalence 0.8431 0.13725 0.01961
## Balanced Accuracy 0.7000 0.70556 0.53571
#ROC
multiclass.roc(
as.numeric(test.data_lobes$Diagnostic),
as.numeric(predicted.classes.multi.nova2.l),
percent = F,
ci.alpha = 0.9,
stratified = FALSE,
plot = TRUE,
grid = TRUE,
legacy.axes = TRUE,
reuse.auc = TRUE,
print.auc = TRUE,
print.thres.col = "blue",
ci.type = "bars",
print.thres.cex = 0.7,
main = "ROC curve",
ylab = "Sensitivity (true positive rate)",
xlab = "1-Specificity (false positive rate)"
)
## Setting direction: controls < cases
## Setting direction: controls < cases
## Setting direction: controls > cases
##
## Call:
## multiclass.roc.default(response = as.numeric(test.data_lobes$Diagnostic), predictor = as.numeric(predicted.classes.multi.nova2.l), percent = F, ci.alpha = 0.9, stratified = FALSE, plot = TRUE, grid = TRUE, legacy.axes = TRUE, reuse.auc = TRUE, print.auc = TRUE, print.thres.col = "blue", ci.type = "bars", print.thres.cex = 0.7, main = "ROC curve", ylab = "Sensitivity (true positive rate)", xlab = "1-Specificity (false positive rate)")
##
## Data: as.numeric(predicted.classes.multi.nova2.l) with 3 levels of as.numeric(test.data_lobes$Diagnostic): 1, 2, 3.
## Multi-class area under the curve: 0.6607
m.multi.nova4.l <-
multinom(Diagnostic ~ K_age_decay, data = train.data_lobes)
## # weights: 9 (4 variable)
## initial value 209.834947
## iter 10 value 159.450633
## iter 20 value 157.883619
## iter 30 value 157.818715
## final value 157.812182
## converged
stargazer(m.multi.nova4.l, type = "text")
##
## ==============================================
## Dependent variable:
## ----------------------------
## AD MCI
## (1) (2)
## ----------------------------------------------
## K_age_decay -56.640*** -19.078**
## (14.131) (9.197)
##
## Constant -29.282*** -9.986**
## (6.942) (4.416)
##
## ----------------------------------------------
## Akaike Inf. Crit. 323.624 323.624
## ==============================================
## Note: *p<0.1; **p<0.05; ***p<0.01
z4.l <-
summary(m.multi.nova4.l)$coefficients / summary(m.multi.nova4.l)$standard.errors
p4.l <- (1 - pnorm(abs(z4.l), 0, 1)) * 2
t(p4.l)
## AD MCI
## (Intercept) 2.464930e-05 0.02374763
## K_age_decay 6.115477e-05 0.03804859
#Para facilitar a interpreta??o:
coef.multi4.l = exp(coef(m.multi.nova4.l))
t(coef.multi4.l)
## AD MCI
## (Intercept) 1.919158e-13 4.602213e-05
## K_age_decay 2.522086e-25 5.180415e-09
#Previsoes
predicted.classes.multi.nova4.l <- m.multi.nova4.l %>% predict(test.data_lobes, type = "class")
#Model accuracy
mean(predicted.classes.multi.nova4.l == test.data_lobes$Diagnostic)
## [1] 0.6078431
# Summary
confusionMatrix(predicted.classes.multi.nova4.l, test.data_lobes$Diagnostic)
## Confusion Matrix and Statistics
##
## Reference
## Prediction CTL AD MCI
## CTL 31 6 14
## AD 0 0 0
## MCI 0 0 0
##
## Overall Statistics
##
## Accuracy : 0.6078
## 95% CI : (0.4611, 0.7416)
## No Information Rate : 0.6078
## P-Value [Acc > NIR] : 0.5609
##
## Kappa : 0
##
## Mcnemar's Test P-Value : NA
##
## Statistics by Class:
##
## Class: CTL Class: AD Class: MCI
## Sensitivity 1.0000 0.0000 0.0000
## Specificity 0.0000 1.0000 1.0000
## Pos Pred Value 0.6078 NaN NaN
## Neg Pred Value NaN 0.8824 0.7255
## Prevalence 0.6078 0.1176 0.2745
## Detection Rate 0.6078 0.0000 0.0000
## Detection Prevalence 1.0000 0.0000 0.0000
## Balanced Accuracy 0.5000 0.5000 0.5000
#ROC
multiclass.roc(
as.numeric(test.data_lobes$Diagnostic),
as.numeric(predicted.classes.multi.nova4.l),
percent = F,
ci.alpha = 0.9,
stratified = FALSE,
plot = TRUE,
grid = TRUE,
legacy.axes = TRUE,
reuse.auc = TRUE,
print.auc = TRUE,
print.thres.col = "blue",
ci.type = "bars",
print.thres.cex = 0.7,
main = "ROC curve",
ylab = "Sensitivity (true positive rate)",
xlab = "1-Specificity (false positive rate)"
)
## Setting direction: controls < cases
## Setting direction: controls < cases
## Setting direction: controls < cases
##
## Call:
## multiclass.roc.default(response = as.numeric(test.data_lobes$Diagnostic), predictor = as.numeric(predicted.classes.multi.nova4.l), percent = F, ci.alpha = 0.9, stratified = FALSE, plot = TRUE, grid = TRUE, legacy.axes = TRUE, reuse.auc = TRUE, print.auc = TRUE, print.thres.col = "blue", ci.type = "bars", print.thres.cex = 0.7, main = "ROC curve", ylab = "Sensitivity (true positive rate)", xlab = "1-Specificity (false positive rate)")
##
## Data: as.numeric(predicted.classes.multi.nova4.l) with 3 levels of as.numeric(test.data_lobes$Diagnostic): 1, 2, 3.
## Multi-class area under the curve: 0.5
m.multi.nova5.l <-
multinom(Diagnostic ~ logAvgThickness_age_decay, data = train.data_lobes)
## # weights: 9 (4 variable)
## initial value 209.834947
## iter 10 value 153.840025
## iter 20 value 151.476849
## iter 30 value 151.287969
## iter 40 value 151.246333
## iter 50 value 151.236198
## iter 60 value 151.235080
## iter 70 value 151.234570
## final value 151.234496
## converged
stargazer(m.multi.nova5.l, type = "text")
##
## ======================================================
## Dependent variable:
## ----------------------------
## AD MCI
## (1) (2)
## ------------------------------------------------------
## logAvgThickness_age_decay -74.854*** -30.346***
## (15.339) (10.062)
##
## Constant 33.999*** 13.880***
## (7.248) (4.873)
##
## ------------------------------------------------------
## Akaike Inf. Crit. 310.469 310.469
## ======================================================
## Note: *p<0.1; **p<0.05; ***p<0.01
z5.l <-
summary(m.multi.nova5.l)$coefficients / summary(m.multi.nova5.l)$standard.errors
p5.l <- (1 - pnorm(abs(z5.l), 0, 1)) * 2
t(p5.l)
## AD MCI
## (Intercept) 2.723315e-06 0.004397112
## logAvgThickness_age_decay 1.061666e-06 0.002562187
#Para facilitar a interpreta??o:
coef.multi5.l = exp(coef(m.multi.nova5.l))
t(coef.multi5.l)
## AD MCI
## (Intercept) 5.827479e+14 1.066388e+06
## logAvgThickness_age_decay 3.100059e-33 6.617367e-14
#Previsoes
predicted.classes.multi.nova5.l <- m.multi.nova5.l %>% predict(test.data_lobes, type = "class")
#Model accuracy
mean(predicted.classes.multi.nova5.l == test.data_lobes$Diagnostic)
## [1] 0.627451
# Summary
confusionMatrix(predicted.classes.multi.nova5.l, test.data_lobes$Diagnostic)
## Confusion Matrix and Statistics
##
## Reference
## Prediction CTL AD MCI
## CTL 31 5 12
## AD 0 1 2
## MCI 0 0 0
##
## Overall Statistics
##
## Accuracy : 0.6275
## 95% CI : (0.4808, 0.7587)
## No Information Rate : 0.6078
## P-Value [Acc > NIR] : 0.4471134
##
## Kappa : 0.1151
##
## Mcnemar's Test P-Value : 0.0002734
##
## Statistics by Class:
##
## Class: CTL Class: AD Class: MCI
## Sensitivity 1.0000 0.16667 0.0000
## Specificity 0.1500 0.95556 1.0000
## Pos Pred Value 0.6458 0.33333 NaN
## Neg Pred Value 1.0000 0.89583 0.7255
## Prevalence 0.6078 0.11765 0.2745
## Detection Rate 0.6078 0.01961 0.0000
## Detection Prevalence 0.9412 0.05882 0.0000
## Balanced Accuracy 0.5750 0.56111 0.5000
#ROC
multiclass.roc(
as.numeric(test.data_lobes$Diagnostic),
as.numeric(predicted.classes.multi.nova5.l),
percent = F,
ci.alpha = 0.9,
stratified = FALSE,
plot = TRUE,
grid = TRUE,
legacy.axes = TRUE,
reuse.auc = TRUE,
print.auc = TRUE,
print.thres.col = "blue",
ci.type = "bars",
print.thres.cex = 0.7,
main = "ROC curve",
ylab = "Sensitivity (true positive rate)",
xlab = "1-Specificity (false positive rate)"
)
## Setting direction: controls < cases
## Setting direction: controls < cases
## Setting direction: controls < cases
##
## Call:
## multiclass.roc.default(response = as.numeric(test.data_lobes$Diagnostic), predictor = as.numeric(predicted.classes.multi.nova5.l), percent = F, ci.alpha = 0.9, stratified = FALSE, plot = TRUE, grid = TRUE, legacy.axes = TRUE, reuse.auc = TRUE, print.auc = TRUE, print.thres.col = "blue", ci.type = "bars", print.thres.cex = 0.7, main = "ROC curve", ylab = "Sensitivity (true positive rate)", xlab = "1-Specificity (false positive rate)")
##
## Data: as.numeric(predicted.classes.multi.nova5.l) with 3 levels of as.numeric(test.data_lobes$Diagnostic): 1, 2, 3.
## Multi-class area under the curve: 0.5476
m.multi.nova0.l <-
multinom(Diagnostic ~ K_corrected + Age + ESC, data = train.data_lobes)
## # weights: 15 (8 variable)
## initial value 209.834947
## iter 10 value 133.159250
## iter 20 value 129.925225
## iter 30 value 127.989850
## iter 40 value 126.403215
## iter 50 value 125.931577
## iter 60 value 125.672273
## iter 70 value 125.531204
## iter 80 value 125.472788
## iter 90 value 125.397238
## iter 100 value 125.382025
## final value 125.382025
## stopped after 100 iterations
stargazer(m.multi.nova0.l, type = "text")
##
## ==============================================
## Dependent variable:
## ----------------------------
## AD MCI
## (1) (2)
## ----------------------------------------------
## K_corrected -52.849*** -19.553**
## (3.260) (8.728)
##
## Age 0.202*** 0.071**
## (0.072) (0.033)
##
## ESC -0.536*** -0.280***
## (0.130) (0.085)
##
## Constant -36.395*** -11.600***
## (4.885) (4.387)
##
## ----------------------------------------------
## Akaike Inf. Crit. 266.764 266.764
## ==============================================
## Note: *p<0.1; **p<0.05; ***p<0.01
z0.l <-
summary(m.multi.nova0.l)$coefficients / summary(m.multi.nova0.l)$standard.errors
p0.l <- (1 - pnorm(abs(z0.l), 0, 1)) * 2
t(p0.l)
## AD MCI
## (Intercept) 9.325873e-14 0.0081872102
## K_corrected 0.000000e+00 0.0250765178
## Age 5.183787e-03 0.0285120426
## ESC 3.741465e-05 0.0009079076
#Para facilitar a interpreta??o:
coef.multi0.l = exp(coef(m.multi.nova0.l))
t(coef.multi0.l)
## AD MCI
## (Intercept) 1.562223e-16 9.169462e-06
## K_corrected 1.116851e-23 3.221745e-09
## Age 1.223843e+00 1.073894e+00
## ESC 5.850557e-01 7.555018e-01
#Previsoes
predicted.classes.multi.nova0.l <- m.multi.nova0.l %>% predict(test.data_lobes, type = "class")
#Model accuracy
mean(predicted.classes.multi.nova0.l == test.data_lobes$Diagnostic)
## [1] 0.6666667
# Summary
confusionMatrix(predicted.classes.multi.nova0.l, test.data_lobes$Diagnostic)
## Confusion Matrix and Statistics
##
## Reference
## Prediction CTL AD MCI
## CTL 27 3 2
## AD 0 0 5
## MCI 4 3 7
##
## Overall Statistics
##
## Accuracy : 0.6667
## 95% CI : (0.5208, 0.7924)
## No Information Rate : 0.6078
## P-Value [Acc > NIR] : 0.2384
##
## Kappa : 0.3731
##
## Mcnemar's Test P-Value : 0.2440
##
## Statistics by Class:
##
## Class: CTL Class: AD Class: MCI
## Sensitivity 0.8710 0.00000 0.5000
## Specificity 0.7500 0.88889 0.8108
## Pos Pred Value 0.8438 0.00000 0.5000
## Neg Pred Value 0.7895 0.86957 0.8108
## Prevalence 0.6078 0.11765 0.2745
## Detection Rate 0.5294 0.00000 0.1373
## Detection Prevalence 0.6275 0.09804 0.2745
## Balanced Accuracy 0.8105 0.44444 0.6554
#ROC
multiclass.roc(
as.numeric(test.data_lobes$Diagnostic),
as.numeric(predicted.classes.multi.nova0.l),
percent = F,
ci.alpha = 0.9,
stratified = FALSE,
plot = TRUE,
grid = TRUE,
legacy.axes = TRUE,
reuse.auc = TRUE,
print.auc = TRUE,
print.thres.col = "blue",
ci.type = "bars",
print.thres.cex = 0.7,
main = "ROC curve",
ylab = "Sensitivity (true positive rate)",
xlab = "1-Specificity (false positive rate)"
)
## Setting direction: controls < cases
## Setting direction: controls < cases
## Setting direction: controls < cases
##
## Call:
## multiclass.roc.default(response = as.numeric(test.data_lobes$Diagnostic), predictor = as.numeric(predicted.classes.multi.nova0.l), percent = F, ci.alpha = 0.9, stratified = FALSE, plot = TRUE, grid = TRUE, legacy.axes = TRUE, reuse.auc = TRUE, print.auc = TRUE, print.thres.col = "blue", ci.type = "bars", print.thres.cex = 0.7, main = "ROC curve", ylab = "Sensitivity (true positive rate)", xlab = "1-Specificity (false positive rate)")
##
## Data: as.numeric(predicted.classes.multi.nova0.l) with 3 levels of as.numeric(test.data_lobes$Diagnostic): 1, 2, 3.
## Multi-class area under the curve: 0.7053
m.multi.nova0_2.l <-
multinom(Diagnostic ~ logAvgThickness + Age + ESC, data = train.data_lobes)
## # weights: 15 (8 variable)
## initial value 209.834947
## iter 10 value 130.024743
## iter 20 value 126.633979
## iter 30 value 123.999814
## iter 40 value 120.794615
## iter 50 value 120.084969
## iter 60 value 119.740361
## iter 70 value 119.393359
## iter 80 value 119.273031
## iter 90 value 119.045426
## iter 100 value 118.992056
## final value 118.992056
## stopped after 100 iterations
stargazer(m.multi.nova0_2.l, type = "text")
##
## ==============================================
## Dependent variable:
## ----------------------------
## AD MCI
## (1) (2)
## ----------------------------------------------
## logAvgThickness -88.595*** -28.955***
## (3.313) (9.779)
##
## Age 0.208*** 0.065**
## (0.073) (0.033)
##
## ESC -0.519*** -0.251***
## (0.138) (0.084)
##
## Constant 28.608*** 11.181**
## (5.007) (5.623)
##
## ----------------------------------------------
## Akaike Inf. Crit. 253.984 253.984
## ==============================================
## Note: *p<0.1; **p<0.05; ***p<0.01
z0_2.l <-
summary(m.multi.nova0_2.l)$coefficients / summary(m.multi.nova0_2.l)$standard.errors
p0_2.l <- (1 - pnorm(abs(z0_2.l), 0, 1)) * 2
t(p0_2.l)
## AD MCI
## (Intercept) 1.108013e-08 0.046765884
## logAvgThickness 0.000000e+00 0.003067268
## Age 4.612588e-03 0.045980896
## ESC 1.708870e-04 0.002911646
#Para facilitar a interpreta??o:
coef.multi0_2.l = exp(coef(m.multi.nova0_2.l))
t(coef.multi0_2.l)
## AD MCI
## (Intercept) 2.655232e+12 7.176367e+04
## logAvgThickness 3.340629e-39 2.661711e-13
## Age 1.230689e+00 1.067615e+00
## ESC 5.953973e-01 7.779830e-01
#Previsoes
predicted.classes.multi.nova0_2.l <- m.multi.nova0_2.l %>% predict(test.data_lobes, type = "class")
#Model accuracy
mean(predicted.classes.multi.nova0_2.l == test.data_lobes$Diagnostic)
## [1] 0.5686275
# Summary
confusionMatrix(predicted.classes.multi.nova0_2.l, test.data_lobes$Diagnostic)
## Confusion Matrix and Statistics
##
## Reference
## Prediction CTL AD MCI
## CTL 25 1 8
## AD 0 1 3
## MCI 6 4 3
##
## Overall Statistics
##
## Accuracy : 0.5686
## 95% CI : (0.4225, 0.7065)
## No Information Rate : 0.6078
## P-Value [Acc > NIR] : 0.7647
##
## Kappa : 0.1633
##
## Mcnemar's Test P-Value : 0.6989
##
## Statistics by Class:
##
## Class: CTL Class: AD Class: MCI
## Sensitivity 0.8065 0.16667 0.21429
## Specificity 0.5500 0.93333 0.72973
## Pos Pred Value 0.7353 0.25000 0.23077
## Neg Pred Value 0.6471 0.89362 0.71053
## Prevalence 0.6078 0.11765 0.27451
## Detection Rate 0.4902 0.01961 0.05882
## Detection Prevalence 0.6667 0.07843 0.25490
## Balanced Accuracy 0.6782 0.55000 0.47201
#ROC
multiclass.roc(
as.numeric(test.data_lobes$Diagnostic),
as.numeric(predicted.classes.multi.nova0_2.l),
percent = F,
ci.alpha = 0.9,
stratified = FALSE,
plot = TRUE,
grid = TRUE,
legacy.axes = TRUE,
reuse.auc = TRUE,
print.auc = TRUE,
print.thres.col = "blue",
ci.type = "bars",
print.thres.cex = 0.7,
main = "ROC curve",
ylab = "Sensitivity (true positive rate)",
xlab = "1-Specificity (false positive rate)"
)
## Setting direction: controls < cases
## Setting direction: controls < cases
## Setting direction: controls > cases
##
## Call:
## multiclass.roc.default(response = as.numeric(test.data_lobes$Diagnostic), predictor = as.numeric(predicted.classes.multi.nova0_2.l), percent = F, ci.alpha = 0.9, stratified = FALSE, plot = TRUE, grid = TRUE, legacy.axes = TRUE, reuse.auc = TRUE, print.auc = TRUE, print.thres.col = "blue", ci.type = "bars", print.thres.cex = 0.7, main = "ROC curve", ylab = "Sensitivity (true positive rate)", xlab = "1-Specificity (false positive rate)")
##
## Data: as.numeric(predicted.classes.multi.nova0_2.l) with 3 levels of as.numeric(test.data_lobes$Diagnostic): 1, 2, 3.
## Multi-class area under the curve: 0.7188
dados_lobos_v1_P$Diagnostic <- factor(dados_lobos_v1_P$Diagnostic, levels = c("AD", "MCI","CTL"))
#dados_lobos_v1_P$Diagnostic <- relevel(dados_lobos_v1_P$Diagnostic, "CTL")
# test.samples <- c(sample(which(dados_hemi_v1_filter$Diagnostic == "AD"), N_ALZ), sample(which(dados_hemi_v1_filter$Diagnostic == "CTL"), N_CTL), sample(which(dados_hemi_v1_filter$Diagnostic == "MCI"), N_CCL))
# subj.training <- as_tibble(dados_hemi_v1_filter[-test.samples, ]$SUBJ)
# colnames(subj.training) <- c("SUBJ")
# filter(dados_lobos_v1_P, SUBJ == subj.training)
train.data_lobes <- anti_join(dados_lobos_v1_P, subj.training)
## Joining, by = "SUBJ"
test.data_lobes <- semi_join(dados_lobos_v1_P, subj.training)
## Joining, by = "SUBJ"
#train.data_lobes <- dados_lobos_v1_P[-test.samples, ]
#test.data_lobes <- dados_lobos_v1_P[test.samples, ]
caret::featurePlot(x = dados_lobos_v1_P[, c("K", "logAvgThickness", "K_age_decay", "logAvgThickness_age_decay")], y = dados_lobos_v1_P$Diagnostic, plot = "box", scales = list(y = list(relation = "free"), x = list(rot = 90)), layout = c(4, 1))
print(n_distinct(dados_lobos_v1_P$SUBJ))
## [1] 123
print(n_distinct(train.data_lobes$SUBJ))
## [1] 97
print(n_distinct(test.data_lobes$SUBJ))
## [1] 26
# ggplot(dados_lobos_v1_P, aes(x = Diagnostic, y = K_corrected, color = Diagnostic, fill = Diagnostic)) +
# geom_violin(trim = FALSE, alpha = 0.4) + geom_jitter() +
# theme_pubr() + stat_compare_means(method = "anova") + labs(caption = paste("N = ", n_distinct(filter(dados_lobos_v1_P, !is.na(K))$SUBJ)))
#
# ggplot(dados_lobos_v1_P, aes(x = Diagnostic, y = K_age_decay, color = Diagnostic, fill = Diagnostic)) +
# geom_violin(trim = FALSE, alpha = 0.4) + geom_jitter() +
# theme_pubr() + stat_compare_means(method = "anova") + labs(caption = paste("N = ", n_distinct(filter(dados_lobos_v1_P, !is.na(K_age_decay))$SUBJ)))
#
# ggplot(dados_lobos_v1_P, aes(x = Diagnostic, y = logAvgThickness, color = Diagnostic, fill = Diagnostic)) +
# geom_violin(trim = FALSE, alpha = 0.4) + geom_jitter() +
# theme_pubr() +stat_compare_means(method = "anova") + labs(caption = paste("N = ", n_distinct(filter(dados_lobos_v1_P, !is.na(logAvgThickness))$SUBJ)))
#
# ggplot(dados_lobos_v1_P, aes(x = Diagnostic, y = logAvgThickness_age_decay, color = Diagnostic, fill = Diagnostic)) +
# geom_violin(trim = FALSE, alpha = 0.4) + geom_jitter() +
# theme_pubr() + stat_compare_means(method = "anova") + labs(caption = paste("N = ", n_distinct(filter(dados_lobos_v1_P, !is.na(logAvgThickness_age_decay))$SUBJ)))
caret::featurePlot(x = train.data_lobes[, c("K", "logAvgThickness", "K_age_decay", "logAvgThickness_age_decay")], y = train.data_lobes$Diagnostic, plot = "box", scales = list(y = list(relation = "free"), x = list(rot = 90)), layout = c(4, 1))
multinom1.l <- multinom(Diagnostic ~ K_corrected + Age, data = train.data_lobes)
## # weights: 12 (6 variable)
## initial value 209.834947
## iter 10 value 141.267129
## iter 20 value 138.460587
## iter 30 value 136.975934
## iter 40 value 136.208586
## iter 50 value 135.793166
## iter 60 value 135.565539
## iter 70 value 135.425613
## iter 80 value 135.319929
## iter 90 value 135.270924
## iter 100 value 135.234107
## final value 135.234107
## stopped after 100 iterations
multinom2.l <- multinom(Diagnostic ~ logAvgThickness + Age, data = train.data_lobes)
## # weights: 12 (6 variable)
## initial value 209.834947
## iter 10 value 140.112316
## iter 20 value 140.109087
## iter 30 value 140.107806
## iter 40 value 140.107086
## iter 40 value 140.107086
## final value 140.107086
## converged
multinom4.l <- multinom(Diagnostic ~ K_age_decay, data = train.data_lobes)
## # weights: 9 (4 variable)
## initial value 209.834947
## iter 10 value 164.092859
## iter 20 value 160.298396
## iter 30 value 158.442357
## iter 40 value 157.908980
## iter 50 value 157.618570
## iter 60 value 157.391961
## iter 70 value 157.271370
## final value 157.266006
## converged
multinom5.l <- multinom(Diagnostic ~ logAvgThickness_age_decay, data = train.data_lobes)
## # weights: 9 (4 variable)
## initial value 209.834947
## iter 10 value 167.914186
## iter 20 value 167.707482
## iter 30 value 167.628680
## iter 40 value 167.591576
## iter 50 value 167.576582
## iter 60 value 167.568447
## iter 70 value 167.567494
## final value 167.567358
## converged
multinom0.l <- multinom(Diagnostic ~ K_corrected + Age + ESC, data = train.data_lobes)
## # weights: 15 (8 variable)
## initial value 209.834947
## iter 10 value 134.356267
## iter 20 value 130.835410
## iter 30 value 126.999669
## iter 40 value 126.229308
## iter 50 value 126.048718
## iter 60 value 126.033128
## iter 70 value 125.993333
## iter 80 value 125.981032
## iter 90 value 125.973457
## iter 100 value 125.966099
## final value 125.966099
## stopped after 100 iterations
multinom0_2.l <- multinom(Diagnostic ~ logAvgThickness + Age + ESC, data = train.data_lobes)
## # weights: 15 (8 variable)
## initial value 209.834947
## iter 10 value 133.888425
## iter 20 value 131.025158
## iter 20 value 131.025157
## iter 30 value 130.994199
## iter 40 value 130.944127
## iter 50 value 130.936848
## final value 130.936690
## converged
## da estatistica ##
summary(multinom1.l)
## Call:
## multinom(formula = Diagnostic ~ K_corrected + Age, data = train.data_lobes)
##
## Coefficients:
## (Intercept) K_corrected Age
## MCI 32.62419 41.41588 -0.1353049
## CTL 40.78570 38.92756 -0.2590214
##
## Std. Errors:
## (Intercept) K_corrected Age
## MCI 4.215444 4.897229 0.06421664
## CTL 4.549258 4.656325 0.06446859
##
## Residual Deviance: 270.4682
## AIC: 282.4682
summary(multinom2.l)
## Call:
## multinom(formula = Diagnostic ~ logAvgThickness + Age, data = train.data_lobes)
##
## Coefficients:
## (Intercept) logAvgThickness Age
## MCI 14.16578 6.695054 -0.2093745
## CTL 21.08276 11.418877 -0.3216516
##
## Std. Errors:
## (Intercept) logAvgThickness Age
## MCI 4.697883 5.942391 0.06054105
## CTL 4.728078 6.004256 0.06203983
##
## Residual Deviance: 280.2142
## AIC: 292.2142
summary(multinom4.l)
## Call:
## multinom(formula = Diagnostic ~ K_age_decay, data = train.data_lobes)
##
## Coefficients:
## (Intercept) K_age_decay
## MCI 27.61423 53.97525
## CTL 27.82294 52.63510
##
## Std. Errors:
## (Intercept) K_age_decay
## MCI 7.069096 14.16927
## CTL 6.554405 13.06617
##
## Residual Deviance: 314.532
## AIC: 322.532
summary(multinom5.l)
## Call:
## multinom(formula = Diagnostic ~ logAvgThickness_age_decay, data = train.data_lobes)
##
## Coefficients:
## (Intercept) logAvgThickness_age_decay
## MCI -2.888453 9.414015
## CTL -4.802201 16.196927
##
## Std. Errors:
## (Intercept) logAvgThickness_age_decay
## MCI 6.738466 16.59502
## CTL 6.194693 15.25847
##
## Residual Deviance: 335.1347
## AIC: 343.1347
summary(multinom0.l)
## Call:
## multinom(formula = Diagnostic ~ K_corrected + Age + ESC, data = train.data_lobes)
##
## Coefficients:
## (Intercept) K_corrected Age ESC
## MCI 33.36283 50.94232 -0.1145390 0.2157760
## CTL 35.28085 47.20934 -0.2117836 0.4720196
##
## Std. Errors:
## (Intercept) K_corrected Age ESC
## MCI 4.821311 4.997858 0.06726226 0.1206859
## CTL 4.941775 4.895750 0.06763127 0.1280171
##
## Residual Deviance: 251.9322
## AIC: 267.9322
summary(multinom0_2.l)
## Call:
## multinom(formula = Diagnostic ~ logAvgThickness + Age + ESC,
## data = train.data_lobes)
##
## Coefficients:
## (Intercept) logAvgThickness Age ESC
## MCI 8.335468 11.85025 -0.1896330 0.1987492
## CTL 10.425206 15.45735 -0.2797791 0.4501467
##
## Std. Errors:
## (Intercept) logAvgThickness Age ESC
## MCI 5.000568 6.140268 0.06270940 0.1114571
## CTL 5.016519 6.093216 0.06429726 0.1204567
##
## Residual Deviance: 261.8734
## AIC: 277.8734
# anova(multinom5, multinom4, test = "Chisq")
## da estatistica ##
m.multi.nova1.l <-
multinom(Diagnostic ~ K_corrected + Age, data = train.data_lobes)
## # weights: 12 (6 variable)
## initial value 209.834947
## iter 10 value 141.267129
## iter 20 value 138.460587
## iter 30 value 136.975934
## iter 40 value 136.208586
## iter 50 value 135.793166
## iter 60 value 135.565539
## iter 70 value 135.425613
## iter 80 value 135.319929
## iter 90 value 135.270924
## iter 100 value 135.234107
## final value 135.234107
## stopped after 100 iterations
stargazer(m.multi.nova1.l, type = "text")
##
## ==============================================
## Dependent variable:
## ----------------------------
## MCI CTL
## (1) (2)
## ----------------------------------------------
## K_corrected 41.416*** 38.928***
## (4.897) (4.656)
##
## Age -0.135** -0.259***
## (0.064) (0.064)
##
## Constant 32.624*** 40.786***
## (4.215) (4.549)
##
## ----------------------------------------------
## Akaike Inf. Crit. 282.468 282.468
## ==============================================
## Note: *p<0.1; **p<0.05; ***p<0.01
z1.l <-
summary(m.multi.nova1.l)$coefficients / summary(m.multi.nova1.l)$standard.errors
p1.l <- (1 - pnorm(abs(z1.l), 0, 1)) * 2
t(p1.l)
## MCI CTL
## (Intercept) 9.992007e-15 0.000000e+00
## K_corrected 0.000000e+00 0.000000e+00
## Age 3.511694e-02 5.874584e-05
#Para facilitar a interpreta??o:
coef.multi1.l = exp(coef(m.multi.nova1.l))
t(coef.multi1.l)
## MCI CTL
## (Intercept) 1.474021e+14 5.164209e+17
## K_corrected 9.698138e+17 8.054249e+16
## Age 8.734495e-01 7.718065e-01
#Previsoes
predicted.classes.multi.nova1.l <- m.multi.nova1.l %>% predict(test.data_lobes, type = "class")
#Model accuracy
mean(predicted.classes.multi.nova1.l == test.data_lobes$Diagnostic)
## [1] 0.7254902
# Summary
confusionMatrix(predicted.classes.multi.nova1.l, test.data_lobes$Diagnostic)
## Confusion Matrix and Statistics
##
## Reference
## Prediction AD MCI CTL
## AD 3 1 1
## MCI 1 5 1
## CTL 2 8 29
##
## Overall Statistics
##
## Accuracy : 0.7255
## 95% CI : (0.5826, 0.8411)
## No Information Rate : 0.6078
## P-Value [Acc > NIR] : 0.05502
##
## Kappa : 0.4351
##
## Mcnemar's Test P-Value : 0.12294
##
## Statistics by Class:
##
## Class: AD Class: MCI Class: CTL
## Sensitivity 0.50000 0.35714 0.9355
## Specificity 0.95556 0.94595 0.5000
## Pos Pred Value 0.60000 0.71429 0.7436
## Neg Pred Value 0.93478 0.79545 0.8333
## Prevalence 0.11765 0.27451 0.6078
## Detection Rate 0.05882 0.09804 0.5686
## Detection Prevalence 0.09804 0.13725 0.7647
## Balanced Accuracy 0.72778 0.65154 0.7177
#ROC
multiclass.roc(
as.numeric(test.data_lobes$Diagnostic),
as.numeric(predicted.classes.multi.nova1.l),
percent = F,
ci.alpha = 0.9,
stratified = FALSE,
plot = TRUE,
grid = TRUE,
legacy.axes = TRUE,
reuse.auc = TRUE,
print.auc = TRUE,
print.thres.col = "blue",
ci.type = "bars",
print.thres.cex = 0.7,
main = "ROC curve",
ylab = "Sensitivity (true positive rate)",
xlab = "1-Specificity (false positive rate)"
)
## Setting direction: controls < cases
## Setting direction: controls < cases
## Setting direction: controls < cases
##
## Call:
## multiclass.roc.default(response = as.numeric(test.data_lobes$Diagnostic), predictor = as.numeric(predicted.classes.multi.nova1.l), percent = F, ci.alpha = 0.9, stratified = FALSE, plot = TRUE, grid = TRUE, legacy.axes = TRUE, reuse.auc = TRUE, print.auc = TRUE, print.thres.col = "blue", ci.type = "bars", print.thres.cex = 0.7, main = "ROC curve", ylab = "Sensitivity (true positive rate)", xlab = "1-Specificity (false positive rate)")
##
## Data: as.numeric(predicted.classes.multi.nova1.l) with 3 levels of as.numeric(test.data_lobes$Diagnostic): 1, 2, 3.
## Multi-class area under the curve: 0.7288
m.multi.nova2.l <-
multinom(Diagnostic ~ logAvgThickness + Age, data = train.data_lobes)
## # weights: 12 (6 variable)
## initial value 209.834947
## iter 10 value 140.112316
## iter 20 value 140.109087
## iter 30 value 140.107806
## iter 40 value 140.107086
## iter 40 value 140.107086
## final value 140.107086
## converged
stargazer(m.multi.nova2.l, type = "text")
##
## ==============================================
## Dependent variable:
## ----------------------------
## MCI CTL
## (1) (2)
## ----------------------------------------------
## logAvgThickness 6.695 11.419*
## (5.942) (6.004)
##
## Age -0.209*** -0.322***
## (0.061) (0.062)
##
## Constant 14.166*** 21.083***
## (4.698) (4.728)
##
## ----------------------------------------------
## Akaike Inf. Crit. 292.214 292.214
## ==============================================
## Note: *p<0.1; **p<0.05; ***p<0.01
z2.l <-
summary(m.multi.nova2.l)$coefficients / summary(m.multi.nova2.l)$standard.errors
p2.l <- (1 - pnorm(abs(z2.l), 0, 1)) * 2
t(p2.l)
## MCI CTL
## (Intercept) 0.0025667972 8.232206e-06
## logAvgThickness 0.2598862842 5.719766e-02
## Age 0.0005434175 2.164803e-07
#Para facilitar a interpreta??o:
coef.multi2.l = exp(coef(m.multi.nova2.l))
t(coef.multi2.l)
## MCI CTL
## (Intercept) 1.419450e+06 1.432598e+09
## logAvgThickness 8.083972e+02 9.102387e+04
## Age 8.110915e-01 7.249507e-01
#Previsoes
predicted.classes.multi.nova2.l <- m.multi.nova2.l %>% predict(test.data_lobes, type = "class")
#Model accuracy
mean(predicted.classes.multi.nova2.l == test.data_lobes$Diagnostic)
## [1] 0.6470588
# Summary
confusionMatrix(predicted.classes.multi.nova2.l, test.data_lobes$Diagnostic)
## Confusion Matrix and Statistics
##
## Reference
## Prediction AD MCI CTL
## AD 4 4 2
## MCI 0 0 0
## CTL 2 10 29
##
## Overall Statistics
##
## Accuracy : 0.6471
## 95% CI : (0.5007, 0.7757)
## No Information Rate : 0.6078
## P-Value [Acc > NIR] : 0.336847
##
## Kappa : 0.2772
##
## Mcnemar's Test P-Value : 0.002905
##
## Statistics by Class:
##
## Class: AD Class: MCI Class: CTL
## Sensitivity 0.66667 0.0000 0.9355
## Specificity 0.86667 1.0000 0.4000
## Pos Pred Value 0.40000 NaN 0.7073
## Neg Pred Value 0.95122 0.7255 0.8000
## Prevalence 0.11765 0.2745 0.6078
## Detection Rate 0.07843 0.0000 0.5686
## Detection Prevalence 0.19608 0.0000 0.8039
## Balanced Accuracy 0.76667 0.5000 0.6677
#ROC
multiclass.roc(
as.numeric(test.data_lobes$Diagnostic),
as.numeric(predicted.classes.multi.nova2.l),
percent = F,
ci.alpha = 0.9,
stratified = FALSE,
plot = TRUE,
grid = TRUE,
legacy.axes = TRUE,
reuse.auc = TRUE,
print.auc = TRUE,
print.thres.col = "blue",
ci.type = "bars",
print.thres.cex = 0.7,
main = "ROC curve",
ylab = "Sensitivity (true positive rate)",
xlab = "1-Specificity (false positive rate)"
)
## Setting direction: controls < cases
## Setting direction: controls < cases
## Setting direction: controls < cases
##
## Call:
## multiclass.roc.default(response = as.numeric(test.data_lobes$Diagnostic), predictor = as.numeric(predicted.classes.multi.nova2.l), percent = F, ci.alpha = 0.9, stratified = FALSE, plot = TRUE, grid = TRUE, legacy.axes = TRUE, reuse.auc = TRUE, print.auc = TRUE, print.thres.col = "blue", ci.type = "bars", print.thres.cex = 0.7, main = "ROC curve", ylab = "Sensitivity (true positive rate)", xlab = "1-Specificity (false positive rate)")
##
## Data: as.numeric(predicted.classes.multi.nova2.l) with 3 levels of as.numeric(test.data_lobes$Diagnostic): 1, 2, 3.
## Multi-class area under the curve: 0.7007
m.multi.nova4.l <-
multinom(Diagnostic ~ K_age_decay, data = train.data_lobes)
## # weights: 9 (4 variable)
## initial value 209.834947
## iter 10 value 164.092859
## iter 20 value 160.298396
## iter 30 value 158.442357
## iter 40 value 157.908980
## iter 50 value 157.618570
## iter 60 value 157.391961
## iter 70 value 157.271370
## final value 157.266006
## converged
stargazer(m.multi.nova4.l, type = "text")
##
## ==============================================
## Dependent variable:
## ----------------------------
## MCI CTL
## (1) (2)
## ----------------------------------------------
## K_age_decay 53.975*** 52.635***
## (14.169) (13.066)
##
## Constant 27.614*** 27.823***
## (7.069) (6.554)
##
## ----------------------------------------------
## Akaike Inf. Crit. 322.532 322.532
## ==============================================
## Note: *p<0.1; **p<0.05; ***p<0.01
z4.l <-
summary(m.multi.nova4.l)$coefficients / summary(m.multi.nova4.l)$standard.errors
p4.l <- (1 - pnorm(abs(z4.l), 0, 1)) * 2
t(p4.l)
## MCI CTL
## (Intercept) 9.370838e-05 2.186696e-05
## K_age_decay 1.393510e-04 5.617009e-05
#Para facilitar a interpreta??o:
coef.multi4.l = exp(coef(m.multi.nova4.l))
t(coef.multi4.l)
## MCI CTL
## (Intercept) 9.833449e+11 1.211568e+12
## K_age_decay 2.761548e+23 7.229893e+22
#Previsoes
predicted.classes.multi.nova4.l <- m.multi.nova4.l %>% predict(test.data_lobes, type = "class")
#Model accuracy
mean(predicted.classes.multi.nova4.l == test.data_lobes$Diagnostic)
## [1] 0.6078431
# Summary
confusionMatrix(predicted.classes.multi.nova4.l, test.data_lobes$Diagnostic)
## Confusion Matrix and Statistics
##
## Reference
## Prediction AD MCI CTL
## AD 0 1 0
## MCI 0 0 0
## CTL 6 13 31
##
## Overall Statistics
##
## Accuracy : 0.6078
## 95% CI : (0.4611, 0.7416)
## No Information Rate : 0.6078
## P-Value [Acc > NIR] : 0.5609369
##
## Kappa : 0.0239
##
## Mcnemar's Test P-Value : 0.0001697
##
## Statistics by Class:
##
## Class: AD Class: MCI Class: CTL
## Sensitivity 0.00000 0.0000 1.0000
## Specificity 0.97778 1.0000 0.0500
## Pos Pred Value 0.00000 NaN 0.6200
## Neg Pred Value 0.88000 0.7255 1.0000
## Prevalence 0.11765 0.2745 0.6078
## Detection Rate 0.00000 0.0000 0.6078
## Detection Prevalence 0.01961 0.0000 0.9804
## Balanced Accuracy 0.48889 0.5000 0.5250
#ROC
multiclass.roc(
as.numeric(test.data_lobes$Diagnostic),
as.numeric(predicted.classes.multi.nova4.l),
percent = F,
ci.alpha = 0.9,
stratified = FALSE,
plot = TRUE,
grid = TRUE,
legacy.axes = TRUE,
reuse.auc = TRUE,
print.auc = TRUE,
print.thres.col = "blue",
ci.type = "bars",
print.thres.cex = 0.7,
main = "ROC curve",
ylab = "Sensitivity (true positive rate)",
xlab = "1-Specificity (false positive rate)"
)
## Setting direction: controls < cases
## Setting direction: controls < cases
## Setting direction: controls < cases
##
## Call:
## multiclass.roc.default(response = as.numeric(test.data_lobes$Diagnostic), predictor = as.numeric(predicted.classes.multi.nova4.l), percent = F, ci.alpha = 0.9, stratified = FALSE, plot = TRUE, grid = TRUE, legacy.axes = TRUE, reuse.auc = TRUE, print.auc = TRUE, print.thres.col = "blue", ci.type = "bars", print.thres.cex = 0.7, main = "ROC curve", ylab = "Sensitivity (true positive rate)", xlab = "1-Specificity (false positive rate)")
##
## Data: as.numeric(predicted.classes.multi.nova4.l) with 3 levels of as.numeric(test.data_lobes$Diagnostic): 1, 2, 3.
## Multi-class area under the curve: 0.5
m.multi.nova5.l <-
multinom(Diagnostic ~ logAvgThickness_age_decay, data = train.data_lobes)
## # weights: 9 (4 variable)
## initial value 209.834947
## iter 10 value 167.914186
## iter 20 value 167.707482
## iter 30 value 167.628680
## iter 40 value 167.591576
## iter 50 value 167.576582
## iter 60 value 167.568447
## iter 70 value 167.567494
## final value 167.567358
## converged
stargazer(m.multi.nova5.l, type = "text")
##
## ======================================================
## Dependent variable:
## ----------------------------
## MCI CTL
## (1) (2)
## ------------------------------------------------------
## logAvgThickness_age_decay 9.414 16.197
## (16.595) (15.258)
##
## Constant -2.888 -4.802
## (6.738) (6.195)
##
## ------------------------------------------------------
## Akaike Inf. Crit. 343.135 343.135
## ======================================================
## Note: *p<0.1; **p<0.05; ***p<0.01
z5.l <-
summary(m.multi.nova5.l)$coefficients / summary(m.multi.nova5.l)$standard.errors
p5.l <- (1 - pnorm(abs(z5.l), 0, 1)) * 2
t(p5.l)
## MCI CTL
## (Intercept) 0.6681770 0.4382144
## logAvgThickness_age_decay 0.5705244 0.2884610
#Para facilitar a interpreta??o:
coef.multi5.l = exp(coef(m.multi.nova5.l))
t(coef.multi5.l)
## MCI CTL
## (Intercept) 5.566228e-02 8.211656e-03
## logAvgThickness_age_decay 1.225899e+04 1.082021e+07
#Previsoes
predicted.classes.multi.nova5.l <- m.multi.nova5.l %>% predict(test.data_lobes, type = "class")
#Model accuracy
mean(predicted.classes.multi.nova5.l == test.data_lobes$Diagnostic)
## [1] 0.6078431
# Summary
confusionMatrix(predicted.classes.multi.nova5.l, test.data_lobes$Diagnostic)
## Confusion Matrix and Statistics
##
## Reference
## Prediction AD MCI CTL
## AD 0 0 0
## MCI 0 0 0
## CTL 6 14 31
##
## Overall Statistics
##
## Accuracy : 0.6078
## 95% CI : (0.4611, 0.7416)
## No Information Rate : 0.6078
## P-Value [Acc > NIR] : 0.5609
##
## Kappa : 0
##
## Mcnemar's Test P-Value : NA
##
## Statistics by Class:
##
## Class: AD Class: MCI Class: CTL
## Sensitivity 0.0000 0.0000 1.0000
## Specificity 1.0000 1.0000 0.0000
## Pos Pred Value NaN NaN 0.6078
## Neg Pred Value 0.8824 0.7255 NaN
## Prevalence 0.1176 0.2745 0.6078
## Detection Rate 0.0000 0.0000 0.6078
## Detection Prevalence 0.0000 0.0000 1.0000
## Balanced Accuracy 0.5000 0.5000 0.5000
#ROC
multiclass.roc(
as.numeric(test.data_lobes$Diagnostic),
as.numeric(predicted.classes.multi.nova5.l),
percent = F,
ci.alpha = 0.9,
stratified = FALSE,
plot = TRUE,
grid = TRUE,
legacy.axes = TRUE,
reuse.auc = TRUE,
print.auc = TRUE,
print.thres.col = "blue",
ci.type = "bars",
print.thres.cex = 0.7,
main = "ROC curve",
ylab = "Sensitivity (true positive rate)",
xlab = "1-Specificity (false positive rate)"
)
## Setting direction: controls < cases
## Setting direction: controls < cases
## Setting direction: controls < cases
##
## Call:
## multiclass.roc.default(response = as.numeric(test.data_lobes$Diagnostic), predictor = as.numeric(predicted.classes.multi.nova5.l), percent = F, ci.alpha = 0.9, stratified = FALSE, plot = TRUE, grid = TRUE, legacy.axes = TRUE, reuse.auc = TRUE, print.auc = TRUE, print.thres.col = "blue", ci.type = "bars", print.thres.cex = 0.7, main = "ROC curve", ylab = "Sensitivity (true positive rate)", xlab = "1-Specificity (false positive rate)")
##
## Data: as.numeric(predicted.classes.multi.nova5.l) with 3 levels of as.numeric(test.data_lobes$Diagnostic): 1, 2, 3.
## Multi-class area under the curve: 0.5
m.multi.nova0.l <-
multinom(Diagnostic ~ K_corrected + Age + ESC, data = train.data_lobes)
## # weights: 15 (8 variable)
## initial value 209.834947
## iter 10 value 134.356267
## iter 20 value 130.835410
## iter 30 value 126.999669
## iter 40 value 126.229308
## iter 50 value 126.048718
## iter 60 value 126.033128
## iter 70 value 125.993333
## iter 80 value 125.981032
## iter 90 value 125.973457
## iter 100 value 125.966099
## final value 125.966099
## stopped after 100 iterations
stargazer(m.multi.nova0.l, type = "text")
##
## ==============================================
## Dependent variable:
## ----------------------------
## MCI CTL
## (1) (2)
## ----------------------------------------------
## K_corrected 50.942*** 47.209***
## (4.998) (4.896)
##
## Age -0.115* -0.212***
## (0.067) (0.068)
##
## ESC 0.216* 0.472***
## (0.121) (0.128)
##
## Constant 33.363*** 35.281***
## (4.821) (4.942)
##
## ----------------------------------------------
## Akaike Inf. Crit. 267.932 267.932
## ==============================================
## Note: *p<0.1; **p<0.05; ***p<0.01
z0.l <-
summary(m.multi.nova0.l)$coefficients / summary(m.multi.nova0.l)$standard.errors
p0.l <- (1 - pnorm(abs(z0.l), 0, 1)) * 2
t(p0.l)
## MCI CTL
## (Intercept) 4.520606e-12 9.379164e-13
## K_corrected 0.000000e+00 0.000000e+00
## Age 8.859216e-02 1.739485e-03
## ESC 7.378989e-02 2.267707e-04
#Para facilitar a interpreta??o:
coef.multi0.l = exp(coef(m.multi.nova0.l))
t(coef.multi0.l)
## MCI CTL
## (Intercept) 3.085268e+14 2.100290e+15
## K_corrected 1.330357e+22 3.182415e+20
## Age 8.917772e-01 8.091398e-01
## ESC 1.240824e+00 1.603229e+00
#Previsoes
predicted.classes.multi.nova0.l <- m.multi.nova0.l %>% predict(test.data_lobes, type = "class")
#Model accuracy
mean(predicted.classes.multi.nova0.l == test.data_lobes$Diagnostic)
## [1] 0.7058824
# Summary
confusionMatrix(predicted.classes.multi.nova0.l, test.data_lobes$Diagnostic)
## Confusion Matrix and Statistics
##
## Reference
## Prediction AD MCI CTL
## AD 0 1 1
## MCI 1 11 5
## CTL 5 2 25
##
## Overall Statistics
##
## Accuracy : 0.7059
## 95% CI : (0.5617, 0.8251)
## No Information Rate : 0.6078
## P-Value [Acc > NIR] : 0.0969
##
## Kappa : 0.4371
##
## Mcnemar's Test P-Value : 0.2667
##
## Statistics by Class:
##
## Class: AD Class: MCI Class: CTL
## Sensitivity 0.00000 0.7857 0.8065
## Specificity 0.95556 0.8378 0.6500
## Pos Pred Value 0.00000 0.6471 0.7812
## Neg Pred Value 0.87755 0.9118 0.6842
## Prevalence 0.11765 0.2745 0.6078
## Detection Rate 0.00000 0.2157 0.4902
## Detection Prevalence 0.03922 0.3333 0.6275
## Balanced Accuracy 0.47778 0.8118 0.7282
#ROC
multiclass.roc(
as.numeric(test.data_lobes$Diagnostic),
as.numeric(predicted.classes.multi.nova0.l),
percent = F,
ci.alpha = 0.9,
stratified = FALSE,
plot = TRUE,
grid = TRUE,
legacy.axes = TRUE,
reuse.auc = TRUE,
print.auc = TRUE,
print.thres.col = "blue",
ci.type = "bars",
print.thres.cex = 0.7,
main = "ROC curve",
ylab = "Sensitivity (true positive rate)",
xlab = "1-Specificity (false positive rate)"
)
## Setting direction: controls > cases
## Setting direction: controls < cases
## Setting direction: controls < cases
##
## Call:
## multiclass.roc.default(response = as.numeric(test.data_lobes$Diagnostic), predictor = as.numeric(predicted.classes.multi.nova0.l), percent = F, ci.alpha = 0.9, stratified = FALSE, plot = TRUE, grid = TRUE, legacy.axes = TRUE, reuse.auc = TRUE, print.auc = TRUE, print.thres.col = "blue", ci.type = "bars", print.thres.cex = 0.7, main = "ROC curve", ylab = "Sensitivity (true positive rate)", xlab = "1-Specificity (false positive rate)")
##
## Data: as.numeric(predicted.classes.multi.nova0.l) with 3 levels of as.numeric(test.data_lobes$Diagnostic): 1, 2, 3.
## Multi-class area under the curve: 0.72
m.multi.nova0_2.l <-
multinom(Diagnostic ~ logAvgThickness + Age + ESC, data = train.data_lobes)
## # weights: 15 (8 variable)
## initial value 209.834947
## iter 10 value 133.888425
## iter 20 value 131.025158
## iter 20 value 131.025157
## iter 30 value 130.994199
## iter 40 value 130.944127
## iter 50 value 130.936848
## final value 130.936690
## converged
stargazer(m.multi.nova0_2.l, type = "text")
##
## ==============================================
## Dependent variable:
## ----------------------------
## MCI CTL
## (1) (2)
## ----------------------------------------------
## logAvgThickness 11.850* 15.457**
## (6.140) (6.093)
##
## Age -0.190*** -0.280***
## (0.063) (0.064)
##
## ESC 0.199* 0.450***
## (0.111) (0.120)
##
## Constant 8.335* 10.425**
## (5.001) (5.017)
##
## ----------------------------------------------
## Akaike Inf. Crit. 277.873 277.873
## ==============================================
## Note: *p<0.1; **p<0.05; ***p<0.01
z0_2.l <-
summary(m.multi.nova0_2.l)$coefficients / summary(m.multi.nova0_2.l)$standard.errors
p0_2.l <- (1 - pnorm(abs(z0_2.l), 0, 1)) * 2
t(p0_2.l)
## MCI CTL
## (Intercept) 0.095533435 0.0376932195
## logAvgThickness 0.053616242 0.0111866675
## Age 0.002494593 0.0000135309
## ESC 0.074555340 0.0001862297
#Para facilitar a interpreta??o:
coef.multi0_2.l = exp(coef(m.multi.nova0_2.l))
t(coef.multi0_2.l)
## MCI CTL
## (Intercept) 4.169154e+03 3.369840e+04
## logAvgThickness 1.401197e+05 5.164665e+06
## Age 8.272627e-01 7.559507e-01
## ESC 1.219876e+00 1.568542e+00
#Previsoes
predicted.classes.multi.nova0_2.l <- m.multi.nova0_2.l %>% predict(test.data_lobes, type = "class")
#Model accuracy
mean(predicted.classes.multi.nova0_2.l == test.data_lobes$Diagnostic)
## [1] 0.6078431
# Summary
confusionMatrix(predicted.classes.multi.nova0_2.l, test.data_lobes$Diagnostic)
## Confusion Matrix and Statistics
##
## Reference
## Prediction AD MCI CTL
## AD 0 6 2
## MCI 2 6 4
## CTL 4 2 25
##
## Overall Statistics
##
## Accuracy : 0.6078
## 95% CI : (0.4611, 0.7416)
## No Information Rate : 0.6078
## P-Value [Acc > NIR] : 0.5609
##
## Kappa : 0.2837
##
## Mcnemar's Test P-Value : 0.3430
##
## Statistics by Class:
##
## Class: AD Class: MCI Class: CTL
## Sensitivity 0.0000 0.4286 0.8065
## Specificity 0.8222 0.8378 0.7000
## Pos Pred Value 0.0000 0.5000 0.8065
## Neg Pred Value 0.8605 0.7949 0.7000
## Prevalence 0.1176 0.2745 0.6078
## Detection Rate 0.0000 0.1176 0.4902
## Detection Prevalence 0.1569 0.2353 0.6078
## Balanced Accuracy 0.4111 0.6332 0.7532
#ROC
multiclass.roc(
as.numeric(test.data_lobes$Diagnostic),
as.numeric(predicted.classes.multi.nova0_2.l),
percent = F,
ci.alpha = 0.9,
stratified = FALSE,
plot = TRUE,
grid = TRUE,
legacy.axes = TRUE,
reuse.auc = TRUE,
print.auc = TRUE,
print.thres.col = "blue",
ci.type = "bars",
print.thres.cex = 0.7,
main = "ROC curve",
ylab = "Sensitivity (true positive rate)",
xlab = "1-Specificity (false positive rate)"
)
## Setting direction: controls > cases
## Setting direction: controls < cases
## Setting direction: controls < cases
##
## Call:
## multiclass.roc.default(response = as.numeric(test.data_lobes$Diagnostic), predictor = as.numeric(predicted.classes.multi.nova0_2.l), percent = F, ci.alpha = 0.9, stratified = FALSE, plot = TRUE, grid = TRUE, legacy.axes = TRUE, reuse.auc = TRUE, print.auc = TRUE, print.thres.col = "blue", ci.type = "bars", print.thres.cex = 0.7, main = "ROC curve", ylab = "Sensitivity (true positive rate)", xlab = "1-Specificity (false positive rate)")
##
## Data: as.numeric(predicted.classes.multi.nova0_2.l) with 3 levels of as.numeric(test.data_lobes$Diagnostic): 1, 2, 3.
## Multi-class area under the curve: 0.746
#BAYES —